diff options
Diffstat (limited to 'drivers')
229 files changed, 26529 insertions, 2533 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 613669cb381..ed8a39c994d 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -64,6 +64,8 @@ source "drivers/mmc/Kconfig" source "drivers/mtd/Kconfig" +source "drivers/mux/Kconfig" + source "drivers/net/Kconfig" source "drivers/nvme/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 9eb51453e57..33f1d536cdd 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_$(SPL_TPL_)INPUT) += input/ obj-$(CONFIG_$(SPL_TPL_)LED) += led/ obj-$(CONFIG_$(SPL_TPL_)MMC_SUPPORT) += mmc/ obj-y += mtd/ +obj-$(CONFIG_$(SPL_)MULTIPLEXER) += mux/ obj-$(CONFIG_$(SPL_TPL_)PCH_SUPPORT) += pch/ obj-$(CONFIG_$(SPL_TPL_)PCI) += pci/ obj-$(CONFIG_$(SPL_TPL_)PHY) += phy/ diff --git a/drivers/clk/altera/clk-agilex.c b/drivers/clk/altera/clk-agilex.c index 9927ada201f..a539889d5be 100644 --- a/drivers/clk/altera/clk-agilex.c +++ b/drivers/clk/altera/clk-agilex.c @@ -47,8 +47,66 @@ static void clk_write_ctrl(struct socfpga_clk_platdata *plat, u32 val) #define MEMBUS_MAINPLL 0 #define MEMBUS_PERPLL 1 #define MEMBUS_TIMEOUT 1000 -#define MEMBUS_ADDR_CLKSLICE 0x27 -#define MEMBUS_CLKSLICE_SYNC_MODE_EN 0x80 + +#define MEMBUS_CLKSLICE_REG 0x27 +#define MEMBUS_SYNTHCALFOSC_INIT_CENTERFREQ_REG 0xb3 +#define MEMBUS_SYNTHPPM_WATCHDOGTMR_VF01_REG 0xe6 +#define MEMBUS_CALCLKSLICE0_DUTY_LOCOVR_REG 0x03 +#define MEMBUS_CALCLKSLICE1_DUTY_LOCOVR_REG 0x07 + +static const struct { + u32 reg; + u32 val; + u32 mask; +} membus_pll[] = { + { + MEMBUS_CLKSLICE_REG, + /* + * BIT[7:7] + * Enable source synchronous mode + */ + BIT(7), + BIT(7) + }, + { + MEMBUS_SYNTHCALFOSC_INIT_CENTERFREQ_REG, + /* + * BIT[0:0] + * Sets synthcalfosc_init_centerfreq=1 to limit overshoot + * frequency during lock + */ + BIT(0), + BIT(0) + }, + { + MEMBUS_SYNTHPPM_WATCHDOGTMR_VF01_REG, + /* + * BIT[0:0] + * Sets synthppm_watchdogtmr_vf0=1 to give the pll more time + * to settle before lock is asserted. + */ + BIT(0), + BIT(0) + }, + { + MEMBUS_CALCLKSLICE0_DUTY_LOCOVR_REG, + /* + * BIT[6:0] + * Centering duty cycle for clkslice0 output + */ + 0x4a, + GENMASK(6, 0) + }, + { + MEMBUS_CALCLKSLICE1_DUTY_LOCOVR_REG, + /* + * BIT[6:0] + * Centering duty cycle for clkslice1 output + */ + 0x4a, + GENMASK(6, 0) + }, +}; static int membus_wait_for_req(struct socfpga_clk_platdata *plat, u32 pll, int timeout) @@ -126,6 +184,20 @@ static int membus_read_pll(struct socfpga_clk_platdata *plat, u32 pll, return 0; } +static void membus_pll_configs(struct socfpga_clk_platdata *plat, u32 pll) +{ + int i; + u32 rdata; + + for (i = 0; i < ARRAY_SIZE(membus_pll); i++) { + membus_read_pll(plat, pll, membus_pll[i].reg, + &rdata, MEMBUS_TIMEOUT); + membus_write_pll(plat, pll, membus_pll[i].reg, + ((rdata & ~membus_pll[i].mask) | membus_pll[i].val), + MEMBUS_TIMEOUT); + } +} + static u32 calc_vocalib_pll(u32 pllm, u32 pllglob) { u32 mdiv, refclkdiv, arefclkdiv, drefclkdiv, mscnt, hscnt, vcocalib; @@ -166,11 +238,20 @@ static void clk_basic_init(struct udevice *dev, { struct socfpga_clk_platdata *plat = dev_get_platdata(dev); u32 vcocalib; - u32 rdata; if (!cfg) return; +#ifdef CONFIG_SPL_BUILD + /* Always force clock manager into boot mode before any configuration */ + clk_write_ctrl(plat, + CM_REG_READL(plat, CLKMGR_CTRL) | CLKMGR_CTRL_BOOTMODE); +#else + /* Skip clock configuration in SSBL if it's not in boot mode */ + if (!(CM_REG_READL(plat, CLKMGR_CTRL) & CLKMGR_CTRL_BOOTMODE)) + return; +#endif + /* Put both PLLs in bypass */ clk_write_bypass_mainpll(plat, CLKMGR_BYPASS_MAINPLL_ALL); clk_write_bypass_perpll(plat, CLKMGR_BYPASS_PERPLL_ALL); @@ -216,19 +297,10 @@ static void clk_basic_init(struct udevice *dev, CM_REG_SETBITS(plat, CLKMGR_PERPLL_PLLGLOB, CLKMGR_PLLGLOB_PD_MASK | CLKMGR_PLLGLOB_RST_MASK); - /* Membus programming to set mainpll and perripll to - * source synchronous mode - */ - membus_read_pll(plat, MEMBUS_MAINPLL, MEMBUS_ADDR_CLKSLICE, &rdata, - MEMBUS_TIMEOUT); - membus_write_pll(plat, MEMBUS_MAINPLL, MEMBUS_ADDR_CLKSLICE, - (rdata | MEMBUS_CLKSLICE_SYNC_MODE_EN), - MEMBUS_TIMEOUT); - membus_read_pll(plat, MEMBUS_PERPLL, MEMBUS_ADDR_CLKSLICE, &rdata, - MEMBUS_TIMEOUT); - membus_write_pll(plat, MEMBUS_PERPLL, MEMBUS_ADDR_CLKSLICE, - (rdata | MEMBUS_CLKSLICE_SYNC_MODE_EN), - MEMBUS_TIMEOUT); + /* Membus programming for mainpll */ + membus_pll_configs(plat, MEMBUS_MAINPLL); + /* Membus programming for peripll */ + membus_pll_configs(plat, MEMBUS_PERPLL); cm_wait_for_lock(CLKMGR_STAT_ALLPLL_LOCKED_MASK); @@ -533,12 +605,20 @@ static ulong socfpga_clk_get_rate(struct clk *clk) case AGILEX_EMAC2_CLK: return clk_get_emac_clk_hz(plat, clk->id); case AGILEX_USB_CLK: + case AGILEX_NAND_X_CLK: return clk_get_l4_mp_clk_hz(plat); + case AGILEX_NAND_CLK: + return clk_get_l4_mp_clk_hz(plat) / 4; default: return -ENXIO; } } +static int socfpga_clk_enable(struct clk *clk) +{ + return 0; +} + static int socfpga_clk_probe(struct udevice *dev) { const struct cm_config *cm_default_cfg = cm_get_default_config(); @@ -562,6 +642,7 @@ static int socfpga_clk_ofdata_to_platdata(struct udevice *dev) } static struct clk_ops socfpga_clk_ops = { + .enable = socfpga_clk_enable, .get_rate = socfpga_clk_get_rate, }; diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile index 2453c38af1a..580b406d7bd 100644 --- a/drivers/clk/at91/Makefile +++ b/drivers/clk/at91/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_AT91_GENERIC_CLK) += clk-generic.o obj-$(CONFIG_AT91_UTMI) += clk-utmi.o obj-$(CONFIG_AT91_SAM9X60_PLL) += clk-sam9x60-pll.o obj-$(CONFIG_SAMA7G5) += sama7g5.o +obj-$(CONFIG_SAM9X60) += sam9x60.o else obj-y += compat.o endif diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index 759df93697d..5d93e6a7e52 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -24,7 +24,7 @@ #define MASTER_PRES_MASK 0x7 #define MASTER_PRES_MAX MASTER_PRES_MASK #define MASTER_DIV_SHIFT 8 -#define MASTER_DIV_MASK 0x3 +#define MASTER_DIV_MASK 0x7 #define PMC_MCR 0x30 #define PMC_MCR_ID_MSK GENMASK(3, 0) diff --git a/drivers/clk/at91/compat.c b/drivers/clk/at91/compat.c index 8cf6254046d..9563285674b 100644 --- a/drivers/clk/at91/compat.c +++ b/drivers/clk/at91/compat.c @@ -9,6 +9,7 @@ #include <common.h> #include <clk-uclass.h> #include <dm.h> +#include <dm/device_compat.h> #include <dm/lists.h> #include <dm/util.h> #include <mach/at91_pmc.h> diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index a6a714fd220..f07f535e492 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -30,7 +30,7 @@ extern const struct clk_master_layout at91sam9x5_master_layout; struct clk_master_characteristics { struct clk_range output; - u32 divisors[4]; + u32 divisors[5]; u8 have_div3_pres; }; diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c new file mode 100644 index 00000000000..c3235f565d7 --- /dev/null +++ b/drivers/clk/at91/sam9x60.c @@ -0,0 +1,649 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2020 Microchip Technology Inc. and its subsidiaries + * + * Author: Claudiu Beznea <claudiu.beznea@microchip.com> + * + * Based on sam9x60.c on Linux. + */ + +#include <common.h> +#include <clk-uclass.h> +#include <dm.h> +#include <dt-bindings/clk/at91.h> +#include <linux/clk-provider.h> + +#include "pmc.h" + +/** + * Clock identifiers to be used in conjunction with macros like + * AT91_TO_CLK_ID() + * + * @ID_MD_SLCK: TD slow clock identifier + * @ID_TD_SLCK: MD slow clock identifier + * @ID_MAIN_XTAL: Main Xtal clock identifier + * @ID_MAIN_RC: Main RC clock identifier + * @ID_MAIN_RC_OSC: Main RC Oscillator clock identifier + * @ID_MAIN_OSC: Main Oscillator clock identifier + * @ID_MAINCK: MAINCK clock identifier + * @ID_PLL_U_FRAC: UPLL fractional clock identifier + * @ID_PLL_U_DIV: UPLL divider clock identifier + * @ID_PLL_A_FRAC: APLL fractional clock identifier + * @ID_PLL_A_DIV: APLL divider clock identifier + + * @ID_MCK: MCK clock identifier + + * @ID_UTMI: UTMI clock identifier + + * @ID_PROG0: Programmable 0 clock identifier + * @ID_PROG1: Programmable 1 clock identifier + + * @ID_PCK0: PCK0 system clock identifier + * @ID_PCK1: PCK1 system clock identifier + * @ID_DDR: DDR system clock identifier + * @ID_QSPI: QSPI system clock identifier + * + * Note: if changing the values of this enums please sync them with + * device tree + */ +enum pmc_clk_ids { + ID_MD_SLCK = 0, + ID_TD_SLCK = 1, + ID_MAIN_XTAL = 2, + ID_MAIN_RC = 3, + ID_MAIN_RC_OSC = 4, + ID_MAIN_OSC = 5, + ID_MAINCK = 6, + + ID_PLL_U_FRAC = 7, + ID_PLL_U_DIV = 8, + ID_PLL_A_FRAC = 9, + ID_PLL_A_DIV = 10, + + ID_MCK = 11, + + ID_UTMI = 12, + + ID_PROG0 = 13, + ID_PROG1 = 14, + + ID_PCK0 = 15, + ID_PCK1 = 16, + + ID_DDR = 17, + ID_QSPI = 18, + + ID_MAX, +}; + +/** + * PLL type identifiers + * @PLL_TYPE_FRAC: fractional PLL identifier + * @PLL_TYPE_DIV: divider PLL identifier + */ +enum pll_type { + PLL_TYPE_FRAC, + PLL_TYPE_DIV, +}; + +/* Clock names used as parents for multiple clocks. */ +static const char *clk_names[] = { + [ID_MAIN_RC_OSC] = "main_rc_osc", + [ID_MAIN_OSC] = "main_osc", + [ID_MAINCK] = "mainck", + [ID_PLL_U_DIV] = "upll_divpmcck", + [ID_PLL_A_DIV] = "plla_divpmcck", + [ID_MCK] = "mck", +}; + +/* Fractional PLL output range. */ +static const struct clk_range plla_outputs[] = { + { .min = 2343750, .max = 1200000000 }, +}; + +static const struct clk_range upll_outputs[] = { + { .min = 300000000, .max = 500000000 }, +}; + +/* PLL characteristics. */ +static const struct clk_pll_characteristics apll_characteristics = { + .input = { .min = 12000000, .max = 48000000 }, + .num_output = ARRAY_SIZE(plla_outputs), + .output = plla_outputs, +}; + +static const struct clk_pll_characteristics upll_characteristics = { + .input = { .min = 12000000, .max = 48000000 }, + .num_output = ARRAY_SIZE(upll_outputs), + .output = upll_outputs, + .upll = true, +}; + +/* Layout for fractional PLLs. */ +static const struct clk_pll_layout pll_layout_frac = { + .mul_mask = GENMASK(31, 24), + .frac_mask = GENMASK(21, 0), + .mul_shift = 24, + .frac_shift = 0, +}; + +/* Layout for DIV PLLs. */ +static const struct clk_pll_layout pll_layout_div = { + .div_mask = GENMASK(7, 0), + .endiv_mask = BIT(29), + .div_shift = 0, + .endiv_shift = 29, +}; + +/* MCK characteristics. */ +static const struct clk_master_characteristics mck_characteristics = { + .output = { .min = 140000000, .max = 200000000 }, + .divisors = { 1, 2, 4, 3 }, + .have_div3_pres = 1, +}; + +/* MCK layout. */ +static const struct clk_master_layout mck_layout = { + .mask = 0x373, + .pres_shift = 4, + .offset = 0x28, +}; + +/* Programmable clock layout. */ +static const struct clk_programmable_layout programmable_layout = { + .pres_mask = 0xff, + .pres_shift = 8, + .css_mask = 0x1f, + .have_slck_mck = 0, + .is_pres_direct = 1, +}; + +/* Peripheral clock layout. */ +static const struct clk_pcr_layout pcr_layout = { + .offset = 0x88, + .cmd = BIT(31), + .gckcss_mask = GENMASK(12, 8), + .pid_mask = GENMASK(6, 0), +}; + +/** + * PLL clocks description + * @n: clock name + * @p: clock parent + * @l: clock layout + * @t: clock type + * @f: true if clock is fixed and not changeable by driver + * @id: clock id corresponding to PLL driver + * @cid: clock id corresponding to clock subsystem + */ +static const struct { + const char *n; + const char *p; + const struct clk_pll_layout *l; + const struct clk_pll_characteristics *c; + u8 t; + u8 f; + u8 id; + u8 cid; +} sam9x60_plls[] = { + { + .n = "plla_fracck", + .p = "mainck", + .l = &pll_layout_frac, + .c = &apll_characteristics, + .t = PLL_TYPE_FRAC, + .f = 1, + .id = 0, + .cid = ID_PLL_A_FRAC, + }, + + { + .n = "plla_divpmcck", + .p = "plla_fracck", + .l = &pll_layout_div, + .c = &apll_characteristics, + .t = PLL_TYPE_DIV, + .f = 1, + .id = 0, + .cid = ID_PLL_A_DIV, + }, + + { + .n = "upll_fracck", + .p = "main_osc", + .l = &pll_layout_frac, + .c = &upll_characteristics, + .t = PLL_TYPE_FRAC, + .f = 1, + .id = 1, + .cid = ID_PLL_U_FRAC, + }, + + { + .n = "upll_divpmcck", + .p = "upll_fracck", + .l = &pll_layout_div, + .c = &upll_characteristics, + .t = PLL_TYPE_DIV, + .f = 1, + .id = 1, + .cid = ID_PLL_U_DIV, + }, +}; + +/** + * Programmable clock description + * @n: clock name + * @cid: clock id corresponding to clock subsystem + */ +static const struct { + const char *n; + u8 cid; +} sam9x60_prog[] = { + { .n = "prog0", .cid = ID_PROG0, }, + { .n = "prog1", .cid = ID_PROG1, }, +}; + +/* Mux table for programmable clocks. */ +static u32 sam9x60_prog_mux_table[] = { 0, 1, 2, 3, 4, 5, }; + +/** + * System clock description + * @n: clock name + * @p: parent clock name + * @id: clock id corresponding to system clock driver + * @cid: clock id corresponding to clock subsystem + */ +static const struct { + const char *n; + const char *p; + u8 id; + u8 cid; +} sam9x60_systemck[] = { + { .n = "ddrck", .p = "mck", .id = 2, .cid = ID_DDR, }, + { .n = "pck0", .p = "prog0", .id = 8, .cid = ID_PCK0, }, + { .n = "pck1", .p = "prog1", .id = 9, .cid = ID_PCK1, }, + { .n = "qspick", .p = "mck", .id = 19, .cid = ID_QSPI, }, +}; + +/** + * Peripheral clock description + * @n: clock name + * @id: clock id + */ +static const struct { + const char *n; + u8 id; +} sam9x60_periphck[] = { + { .n = "pioA_clk", .id = 2, }, + { .n = "pioB_clk", .id = 3, }, + { .n = "pioC_clk", .id = 4, }, + { .n = "flex0_clk", .id = 5, }, + { .n = "flex1_clk", .id = 6, }, + { .n = "flex2_clk", .id = 7, }, + { .n = "flex3_clk", .id = 8, }, + { .n = "flex6_clk", .id = 9, }, + { .n = "flex7_clk", .id = 10, }, + { .n = "flex8_clk", .id = 11, }, + { .n = "sdmmc0_clk", .id = 12, }, + { .n = "flex4_clk", .id = 13, }, + { .n = "flex5_clk", .id = 14, }, + { .n = "flex9_clk", .id = 15, }, + { .n = "flex10_clk", .id = 16, }, + { .n = "tcb0_clk", .id = 17, }, + { .n = "pwm_clk", .id = 18, }, + { .n = "adc_clk", .id = 19, }, + { .n = "dma0_clk", .id = 20, }, + { .n = "matrix_clk", .id = 21, }, + { .n = "uhphs_clk", .id = 22, }, + { .n = "udphs_clk", .id = 23, }, + { .n = "macb0_clk", .id = 24, }, + { .n = "lcd_clk", .id = 25, }, + { .n = "sdmmc1_clk", .id = 26, }, + { .n = "macb1_clk", .id = 27, }, + { .n = "ssc_clk", .id = 28, }, + { .n = "can0_clk", .id = 29, }, + { .n = "can1_clk", .id = 30, }, + { .n = "flex11_clk", .id = 32, }, + { .n = "flex12_clk", .id = 33, }, + { .n = "i2s_clk", .id = 34, }, + { .n = "qspi_clk", .id = 35, }, + { .n = "gfx2d_clk", .id = 36, }, + { .n = "pit64b_clk", .id = 37, }, + { .n = "trng_clk", .id = 38, }, + { .n = "aes_clk", .id = 39, }, + { .n = "tdes_clk", .id = 40, }, + { .n = "sha_clk", .id = 41, }, + { .n = "classd_clk", .id = 42, }, + { .n = "isi_clk", .id = 43, }, + { .n = "pioD_clk", .id = 44, }, + { .n = "tcb1_clk", .id = 45, }, + { .n = "dbgu_clk", .id = 47, }, + { .n = "mpddr_clk", .id = 49, }, +}; + +/** + * Generic clock description + * @n: clock name + * @ep: extra parents parents names + * @ep_mux_table: extra parents mux table + * @ep_clk_mux_table: extra parents clock mux table (for CCF) + * @r: clock output range + * @ep_count: extra parents count + * @id: clock id + */ +static const struct { + const char *n; + struct clk_range r; + u8 id; +} sam9x60_gck[] = { + { .n = "flex0_gclk", .id = 5, }, + { .n = "flex1_gclk", .id = 6, }, + { .n = "flex2_gclk", .id = 7, }, + { .n = "flex3_gclk", .id = 8, }, + { .n = "flex6_gclk", .id = 9, }, + { .n = "flex7_gclk", .id = 10, }, + { .n = "flex8_gclk", .id = 11, }, + { .n = "sdmmc0_gclk", .id = 12, .r = { .min = 0, .max = 105000000 }, }, + { .n = "flex4_gclk", .id = 13, }, + { .n = "flex5_gclk", .id = 14, }, + { .n = "flex9_gclk", .id = 15, }, + { .n = "flex10_gclk", .id = 16, }, + { .n = "tcb0_gclk", .id = 17, }, + { .n = "adc_gclk", .id = 19, }, + { .n = "lcd_gclk", .id = 25, .r = { .min = 0, .max = 140000000 }, }, + { .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, }, + { .n = "flex11_gclk", .id = 32, }, + { .n = "flex12_gclk", .id = 33, }, + { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 }, }, + { .n = "pit64b_gclk", .id = 37, }, + { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 }, }, + { .n = "tcb1_gclk", .id = 45, }, + { .n = "dbgu_gclk", .id = 47, }, +}; + +#define prepare_mux_table(_allocs, _index, _dst, _src, _num, _label) \ + do { \ + int _i; \ + (_dst) = kzalloc(sizeof(*(_dst)) * (_num), GFP_KERNEL); \ + if (!(_dst)) { \ + ret = -ENOMEM; \ + goto _label; \ + } \ + (_allocs)[(_index)++] = (_dst); \ + for (_i = 0; _i < (_num); _i++) \ + (_dst)[_i] = (_src)[_i]; \ + } while (0) + +static int sam9x60_clk_probe(struct udevice *dev) +{ + void __iomem *base = (void *)devfdt_get_addr_ptr(dev); + unsigned int *clkmuxallocs[64], *muxallocs[64]; + const char *p[10]; + unsigned int cm[10], m[10], *tmpclkmux, *tmpmux; + struct clk clk, *c; + bool main_osc_bypass; + int ret, muxallocindex = 0, clkmuxallocindex = 0, i; + static const struct clk_range r = { 0, 0 }; + + if (!base) + return -EINVAL; + + memset(muxallocs, 0, ARRAY_SIZE(muxallocs)); + memset(clkmuxallocs, 0, ARRAY_SIZE(clkmuxallocs)); + + ret = clk_get_by_index(dev, 0, &clk); + if (ret) + return ret; + + ret = clk_get_by_id(clk.id, &c); + if (ret) + return ret; + + clk_names[ID_TD_SLCK] = kmemdup(clk_hw_get_name(c), + strlen(clk_hw_get_name(c)) + 1, + GFP_KERNEL); + if (!clk_names[ID_TD_SLCK]) + return -ENOMEM; + + ret = clk_get_by_index(dev, 1, &clk); + if (ret) + return ret; + + ret = clk_get_by_id(clk.id, &c); + if (ret) + return ret; + + clk_names[ID_MD_SLCK] = kmemdup(clk_hw_get_name(c), + strlen(clk_hw_get_name(c)) + 1, + GFP_KERNEL); + if (!clk_names[ID_MD_SLCK]) + return -ENOMEM; + + ret = clk_get_by_index(dev, 2, &clk); + if (ret) + return ret; + + clk_names[ID_MAIN_XTAL] = kmemdup(clk_hw_get_name(&clk), + strlen(clk_hw_get_name(&clk)) + 1, + GFP_KERNEL); + if (!clk_names[ID_MAIN_XTAL]) + return -ENOMEM; + + ret = clk_get_by_index(dev, 3, &clk); + if (ret) + goto fail; + + clk_names[ID_MAIN_RC] = kmemdup(clk_hw_get_name(&clk), + strlen(clk_hw_get_name(&clk)) + 1, + GFP_KERNEL); + if (ret) + goto fail; + + main_osc_bypass = dev_read_bool(dev, "atmel,main-osc-bypass"); + + /* Register main rc oscillator. */ + c = at91_clk_main_rc(base, clk_names[ID_MAIN_RC_OSC], + clk_names[ID_MAIN_RC]); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MAIN_RC_OSC), c); + + /* Register main oscillator. */ + c = at91_clk_main_osc(base, clk_names[ID_MAIN_OSC], + clk_names[ID_MAIN_XTAL], main_osc_bypass); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MAIN_OSC), c); + + /* Register mainck. */ + p[0] = clk_names[ID_MAIN_RC_OSC]; + p[1] = clk_names[ID_MAIN_OSC]; + cm[0] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MAIN_RC_OSC); + cm[1] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MAIN_OSC); + prepare_mux_table(clkmuxallocs, clkmuxallocindex, tmpclkmux, cm, 2, + fail); + c = at91_clk_sam9x5_main(base, clk_names[ID_MAINCK], p, + 2, tmpclkmux, PMC_TYPE_CORE); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MAINCK), c); + + /* Register PLL fracs clocks. */ + for (i = 0; i < ARRAY_SIZE(sam9x60_plls); i++) { + if (sam9x60_plls[i].t != PLL_TYPE_FRAC) + continue; + + c = sam9x60_clk_register_frac_pll(base, sam9x60_plls[i].n, + sam9x60_plls[i].p, + sam9x60_plls[i].id, + sam9x60_plls[i].c, + sam9x60_plls[i].l, + sam9x60_plls[i].f); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_CORE, sam9x60_plls[i].cid), c); + } + + /* Register PLL div clocks. */ + for (i = 0; i < ARRAY_SIZE(sam9x60_plls); i++) { + if (sam9x60_plls[i].t != PLL_TYPE_DIV) + continue; + + c = sam9x60_clk_register_div_pll(base, sam9x60_plls[i].n, + sam9x60_plls[i].p, + sam9x60_plls[i].id, + sam9x60_plls[i].c, + sam9x60_plls[i].l, + sam9x60_plls[i].f); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_CORE, sam9x60_plls[i].cid), c); + } + + /* Register MCK clock. */ + p[0] = clk_names[ID_MD_SLCK]; + p[1] = clk_names[ID_MAINCK]; + p[2] = clk_names[ID_PLL_A_DIV]; + p[3] = clk_names[ID_PLL_U_DIV]; + cm[0] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MD_SLCK); + cm[1] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MAINCK); + cm[2] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_PLL_A_DIV); + cm[3] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_PLL_U_DIV); + prepare_mux_table(clkmuxallocs, clkmuxallocindex, tmpclkmux, cm, 4, + fail); + c = at91_clk_register_master(base, clk_names[ID_MCK], p, 4, &mck_layout, + &mck_characteristics, tmpclkmux); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MCK), c); + + /* Register programmable clocks. */ + p[0] = clk_names[ID_MD_SLCK]; + p[1] = clk_names[ID_TD_SLCK]; + p[2] = clk_names[ID_MAINCK]; + p[3] = clk_names[ID_MCK]; + p[4] = clk_names[ID_PLL_A_DIV]; + p[5] = clk_names[ID_PLL_U_DIV]; + cm[0] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MD_SLCK); + cm[1] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_TD_SLCK); + cm[2] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MAINCK); + cm[3] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MCK); + cm[4] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_PLL_A_DIV); + cm[5] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_PLL_U_DIV); + for (i = 0; i < ARRAY_SIZE(sam9x60_prog); i++) { + prepare_mux_table(clkmuxallocs, clkmuxallocindex, tmpclkmux, cm, + 6, fail); + + c = at91_clk_register_programmable(base, sam9x60_prog[i].n, p, + 10, i, &programmable_layout, + tmpclkmux, + sam9x60_prog_mux_table); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_CORE, sam9x60_prog[i].cid), c); + } + + /* System clocks. */ + for (i = 0; i < ARRAY_SIZE(sam9x60_systemck); i++) { + c = at91_clk_register_system(base, sam9x60_systemck[i].n, + sam9x60_systemck[i].p, + sam9x60_systemck[i].id); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_SYSTEM, sam9x60_systemck[i].cid), + c); + } + + /* Peripheral clocks. */ + for (i = 0; i < ARRAY_SIZE(sam9x60_periphck); i++) { + c = at91_clk_register_sam9x5_peripheral(base, &pcr_layout, + sam9x60_periphck[i].n, + clk_names[ID_MCK], + sam9x60_periphck[i].id, + &r); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_PERIPHERAL, + sam9x60_periphck[i].id), c); + } + + /* Generic clocks. */ + p[0] = clk_names[ID_MD_SLCK]; + p[1] = clk_names[ID_TD_SLCK]; + p[2] = clk_names[ID_MAINCK]; + p[3] = clk_names[ID_MCK]; + p[4] = clk_names[ID_PLL_A_DIV]; + p[5] = clk_names[ID_PLL_U_DIV]; + m[0] = 0; + m[1] = 1; + m[2] = 2; + m[3] = 3; + m[4] = 4; + m[5] = 5; + cm[0] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MD_SLCK); + cm[1] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_TD_SLCK); + cm[2] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MAINCK); + cm[3] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_MCK); + cm[4] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_PLL_A_DIV); + cm[5] = AT91_TO_CLK_ID(PMC_TYPE_CORE, ID_PLL_U_DIV); + for (i = 0; i < ARRAY_SIZE(sam9x60_gck); i++) { + prepare_mux_table(clkmuxallocs, clkmuxallocindex, tmpclkmux, cm, + 6, fail); + prepare_mux_table(muxallocs, muxallocindex, tmpmux, m, + 6, fail); + + c = at91_clk_register_generic(base, &pcr_layout, + sam9x60_gck[i].n, p, tmpclkmux, + tmpmux, 6, sam9x60_gck[i].id, + &sam9x60_gck[i].r); + if (IS_ERR(c)) { + ret = PTR_ERR(c); + goto fail; + } + clk_dm(AT91_TO_CLK_ID(PMC_TYPE_GCK, sam9x60_gck[i].id), c); + } + + return 0; + +fail: + for (i = 0; i < ARRAY_SIZE(muxallocs); i++) + kfree(muxallocs[i]); + + for (i = 0; i < ARRAY_SIZE(clkmuxallocs); i++) + kfree(clkmuxallocs[i]); + + return ret; +} + +static const struct udevice_id sam9x60_clk_ids[] = { + { .compatible = "microchip,sam9x60-pmc" }, + { /* Sentinel. */ }, +}; + +U_BOOT_DRIVER(at91_sam9x60_pmc) = { + .name = "at91-sam9x60-pmc", + .id = UCLASS_CLK, + .of_match = sam9x60_clk_ids, + .ops = &at91_clk_ops, + .probe = sam9x60_clk_probe, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c index b96937673be..c0d9271966d 100644 --- a/drivers/clk/at91/sama7g5.c +++ b/drivers/clk/at91/sama7g5.c @@ -189,13 +189,13 @@ static const struct clk_pll_layout pll_layout_divio = { /* MCK0 characteristics. */ static const struct clk_master_characteristics mck0_characteristics = { .output = { .min = 140000000, .max = 200000000 }, - .divisors = { 1, 2, 4, 3 }, + .divisors = { 1, 2, 4, 3, 5 }, .have_div3_pres = 1, }; /* MCK0 layout. */ static const struct clk_master_layout mck0_layout = { - .mask = 0x373, + .mask = 0x773, .pres_shift = 4, .offset = 0x28, }; diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 7a5ee7a45fc..f1becd20d8b 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -150,32 +150,8 @@ static int clk_mux_set_parent(struct clk *clk, struct clk *parent) return 0; } -static ulong clk_mux_get_rate(struct clk *clk) -{ - struct clk_mux *mux = to_clk_mux(clk_dev_binded(clk) ? - dev_get_clk_ptr(clk->dev) : clk); - struct udevice *parent; - struct clk *pclk; - int err, index; - - index = clk_mux_get_parent(clk); - if (index >= mux->num_parents) - return -EFAULT; - - err = uclass_get_device_by_name(UCLASS_CLK, mux->parent_names[index], - &parent); - if (err) - return err; - - pclk = dev_get_clk_ptr(parent); - if (!pclk) - return -ENODEV; - - return clk_get_rate(pclk); -} - const struct clk_ops clk_mux_ops = { - .get_rate = clk_mux_get_rate, + .get_rate = clk_generic_get_rate, .set_parent = clk_mux_set_parent, }; diff --git a/drivers/clk/clk-uclass.c b/drivers/clk/clk-uclass.c index 31c5997aead..ac954a34d27 100644 --- a/drivers/clk/clk-uclass.c +++ b/drivers/clk/clk-uclass.c @@ -38,8 +38,7 @@ int clk_get_by_driver_info(struct udevice *dev, struct phandle_1_arg *cells, { int ret; - ret = device_get_by_driver_info((struct driver_info *)cells->node, - &clk->dev); + ret = device_get_by_driver_info_idx(cells->idx, &clk->dev); if (ret) return ret; clk->id = cells->arg[0]; diff --git a/drivers/clk/clk_fixed_rate.c b/drivers/clk/clk_fixed_rate.c index 55e1f8caa52..f86b4a0e924 100644 --- a/drivers/clk/clk_fixed_rate.c +++ b/drivers/clk/clk_fixed_rate.c @@ -46,8 +46,8 @@ static const struct udevice_id clk_fixed_rate_match[] = { { /* sentinel */ } }; -U_BOOT_DRIVER(clk_fixed_rate) = { - .name = "fixed_rate_clock", +U_BOOT_DRIVER(fixed_clock) = { + .name = "fixed_clock", .id = UCLASS_CLK, .of_match = clk_fixed_rate_match, .ofdata_to_platdata = clk_fixed_rate_ofdata_to_platdata, diff --git a/drivers/clk/clk_sandbox.c b/drivers/clk/clk_sandbox.c index 768fbb7c520..0ff1b496338 100644 --- a/drivers/clk/clk_sandbox.c +++ b/drivers/clk/clk_sandbox.c @@ -124,8 +124,8 @@ static const struct udevice_id sandbox_clk_ids[] = { { } }; -U_BOOT_DRIVER(clk_sandbox) = { - .name = "clk_sandbox", +U_BOOT_DRIVER(sandbox_clk) = { + .name = "sandbox_clk", .id = UCLASS_CLK, .of_match = sandbox_clk_ids, .ops = &sandbox_clk_ops, diff --git a/drivers/clk/kendryte/clk.c b/drivers/clk/kendryte/clk.c index bb196961afd..4b959401a63 100644 --- a/drivers/clk/kendryte/clk.c +++ b/drivers/clk/kendryte/clk.c @@ -471,8 +471,7 @@ cleanup_gate: cleanup_div: free(div); cleanup_mux: - if (mux) - free(mux); + free(mux); return comp; } diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 284e2138b39..0c8b9eb47de 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -55,6 +55,24 @@ config CLK_R8A774A1 help Enable this to support the clocks on Renesas R8A774A1 SoC. +config CLK_R8A774B1 + bool "Renesas R8A774B1 clock driver" + depends on CLK_RCAR_GEN3 + help + Enable this to support the clocks on Renesas R8A774B1 SoC. + +config CLK_R8A774C0 + bool "Renesas R8A774C0 clock driver" + depends on CLK_RCAR_GEN3 + help + Enable this to support the clocks on Renesas R8A774C0 SoC. + +config CLK_R8A774E1 + bool "Renesas R8A774E1 clock driver" + depends on CLK_RCAR_GEN3 + help + Enable this to support the clocks on Renesas R8A774E1 SoC. + config CLK_R8A7795 bool "Renesas R8A7795 clock driver" depends on CLK_RCAR_GEN3 diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index dd599b757e8..ed1a1252c40 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -1,6 +1,9 @@ obj-$(CONFIG_CLK_RENESAS) += renesas-cpg-mssr.o obj-$(CONFIG_CLK_RCAR_GEN2) += clk-rcar-gen2.o obj-$(CONFIG_CLK_R8A774A1) += r8a774a1-cpg-mssr.o +obj-$(CONFIG_CLK_R8A774B1) += r8a774b1-cpg-mssr.o +obj-$(CONFIG_CLK_R8A774C0) += r8a774c0-cpg-mssr.o +obj-$(CONFIG_CLK_R8A774E1) += r8a774e1-cpg-mssr.o obj-$(CONFIG_CLK_R8A7790) += r8a7790-cpg-mssr.o obj-$(CONFIG_CLK_R8A7791) += r8a7791-cpg-mssr.o obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c index 8935667736f..6997054b304 100644 --- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c @@ -41,6 +41,7 @@ enum clk_ids { CLK_S2, CLK_S3, CLK_SDSRC, + CLK_RPCSRC, CLK_RINT, /* Module Clocks */ @@ -67,6 +68,7 @@ static const struct cpg_core_clk r8a774a1_core_clks[] = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".rpcsrc", CLK_RPCSRC, CLK_PLL1, 2, 1), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -97,6 +99,7 @@ static const struct cpg_core_clk r8a774a1_core_clks[] = { DEF_GEN3_SD("sd1", R8A774A1_CLK_SD1, CLK_SDSRC, 0x078), DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, CLK_SDSRC, 0x268), DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, CLK_SDSRC, 0x26c), + DEF_GEN3_RPC("rpc", R8A774A1_CLK_RPC, CLK_RPCSRC, 0x238), DEF_FIXED("cl", R8A774A1_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1), @@ -200,6 +203,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] = { DEF_MOD("can-fd", 914, R8A774A1_CLK_S3D2), DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4), DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4), + DEF_MOD("rpc", 917, R8A774A1_CLK_RPC), DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6), DEF_MOD("i2c5", 919, R8A774A1_CLK_S0D6), DEF_MOD("i2c-dvfs", 926, R8A774A1_CLK_CP), diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c new file mode 100644 index 00000000000..7b6947b5b90 --- /dev/null +++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * r8a774b1 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2020 Renesas Electronics Corp. + * + * Based on r8a7796-cpg-mssr.c + * + * Copyright (C) 2016 Glider bvba + */ + +#include <common.h> +#include <clk-uclass.h> +#include <dm.h> + +#include <dt-bindings/clock/r8a774b1-cpg-mssr.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A774B1_CLK_CANFD, + + /* External Input Clocks */ + CLK_EXTAL, + CLK_EXTALR, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL3, + CLK_PLL4, + CLK_PLL1_DIV2, + CLK_PLL1_DIV4, + CLK_S0, + CLK_S1, + CLK_S2, + CLK_S3, + CLK_SDSRC, + CLK_RINT, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a774b1_core_clks[] = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("extalr", CLK_EXTALR), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), + DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), + DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN), + + DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), + DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), + DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + + DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), + + /* Core Clock Outputs */ + DEF_GEN3_Z("z", R8A774B1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8), + DEF_FIXED("ztr", R8A774B1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED("ztrd2", R8A774B1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), + DEF_FIXED("zt", R8A774B1_CLK_ZT, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED("zx", R8A774B1_CLK_ZX, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED("s0d1", R8A774B1_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s0d2", R8A774B1_CLK_S0D2, CLK_S0, 2, 1), + DEF_FIXED("s0d3", R8A774B1_CLK_S0D3, CLK_S0, 3, 1), + DEF_FIXED("s0d4", R8A774B1_CLK_S0D4, CLK_S0, 4, 1), + DEF_FIXED("s0d6", R8A774B1_CLK_S0D6, CLK_S0, 6, 1), + DEF_FIXED("s0d8", R8A774B1_CLK_S0D8, CLK_S0, 8, 1), + DEF_FIXED("s0d12", R8A774B1_CLK_S0D12, CLK_S0, 12, 1), + DEF_FIXED("s1d2", R8A774B1_CLK_S1D2, CLK_S1, 2, 1), + DEF_FIXED("s1d4", R8A774B1_CLK_S1D4, CLK_S1, 4, 1), + DEF_FIXED("s2d1", R8A774B1_CLK_S2D1, CLK_S2, 1, 1), + DEF_FIXED("s2d2", R8A774B1_CLK_S2D2, CLK_S2, 2, 1), + DEF_FIXED("s2d4", R8A774B1_CLK_S2D4, CLK_S2, 4, 1), + DEF_FIXED("s3d1", R8A774B1_CLK_S3D1, CLK_S3, 1, 1), + DEF_FIXED("s3d2", R8A774B1_CLK_S3D2, CLK_S3, 2, 1), + DEF_FIXED("s3d4", R8A774B1_CLK_S3D4, CLK_S3, 4, 1), + + DEF_GEN3_SD("sd0", R8A774B1_CLK_SD0, CLK_SDSRC, 0x074), + DEF_GEN3_SD("sd1", R8A774B1_CLK_SD1, CLK_SDSRC, 0x078), + DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, CLK_SDSRC, 0x268), + DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, CLK_SDSRC, 0x26c), + + DEF_FIXED("cl", R8A774B1_CLK_CL, CLK_PLL1_DIV2, 48, 1), + DEF_FIXED("cp", R8A774B1_CLK_CP, CLK_EXTAL, 2, 1), + DEF_FIXED("cpex", R8A774B1_CLK_CPEX, CLK_EXTAL, 2, 1), + + DEF_DIV6P1("canfd", R8A774B1_CLK_CANFD, CLK_PLL1_DIV4, 0x244), + DEF_DIV6P1("csi0", R8A774B1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), + DEF_DIV6P1("mso", R8A774B1_CLK_MSO, CLK_PLL1_DIV4, 0x014), + DEF_DIV6P1("hdmi", R8A774B1_CLK_HDMI, CLK_PLL1_DIV4, 0x250), + + DEF_GEN3_OSC("osc", R8A774B1_CLK_OSC, CLK_EXTAL, 8), + + DEF_BASE("r", R8A774B1_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), +}; + +static const struct mssr_mod_clk r8a774b1_mod_clks[] = { + DEF_MOD("tmu4", 121, R8A774B1_CLK_S0D6), + DEF_MOD("tmu3", 122, R8A774B1_CLK_S3D2), + DEF_MOD("tmu2", 123, R8A774B1_CLK_S3D2), + DEF_MOD("tmu1", 124, R8A774B1_CLK_S3D2), + DEF_MOD("tmu0", 125, R8A774B1_CLK_CP), + DEF_MOD("fdp1-0", 119, R8A774B1_CLK_S0D1), + DEF_MOD("scif5", 202, R8A774B1_CLK_S3D4), + DEF_MOD("scif4", 203, R8A774B1_CLK_S3D4), + DEF_MOD("scif3", 204, R8A774B1_CLK_S3D4), + DEF_MOD("scif1", 206, R8A774B1_CLK_S3D4), + DEF_MOD("scif0", 207, R8A774B1_CLK_S3D4), + DEF_MOD("msiof3", 208, R8A774B1_CLK_MSO), + DEF_MOD("msiof2", 209, R8A774B1_CLK_MSO), + DEF_MOD("msiof1", 210, R8A774B1_CLK_MSO), + DEF_MOD("msiof0", 211, R8A774B1_CLK_MSO), + DEF_MOD("sys-dmac2", 217, R8A774B1_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A774B1_CLK_S3D1), + DEF_MOD("sys-dmac0", 219, R8A774B1_CLK_S0D3), + DEF_MOD("cmt3", 300, R8A774B1_CLK_R), + DEF_MOD("cmt2", 301, R8A774B1_CLK_R), + DEF_MOD("cmt1", 302, R8A774B1_CLK_R), + DEF_MOD("cmt0", 303, R8A774B1_CLK_R), + DEF_MOD("tpu0", 304, R8A774B1_CLK_S3D4), + DEF_MOD("scif2", 310, R8A774B1_CLK_S3D4), + DEF_MOD("sdif3", 311, R8A774B1_CLK_SD3), + DEF_MOD("sdif2", 312, R8A774B1_CLK_SD2), + DEF_MOD("sdif1", 313, R8A774B1_CLK_SD1), + DEF_MOD("sdif0", 314, R8A774B1_CLK_SD0), + DEF_MOD("pcie1", 318, R8A774B1_CLK_S3D1), + DEF_MOD("pcie0", 319, R8A774B1_CLK_S3D1), + DEF_MOD("usb3-if0", 328, R8A774B1_CLK_S3D1), + DEF_MOD("usb-dmac0", 330, R8A774B1_CLK_S3D1), + DEF_MOD("usb-dmac1", 331, R8A774B1_CLK_S3D1), + DEF_MOD("rwdt", 402, R8A774B1_CLK_R), + DEF_MOD("intc-ex", 407, R8A774B1_CLK_CP), + DEF_MOD("intc-ap", 408, R8A774B1_CLK_S0D3), + DEF_MOD("audmac1", 501, R8A774B1_CLK_S1D2), + DEF_MOD("audmac0", 502, R8A774B1_CLK_S1D2), + DEF_MOD("hscif4", 516, R8A774B1_CLK_S3D1), + DEF_MOD("hscif3", 517, R8A774B1_CLK_S3D1), + DEF_MOD("hscif2", 518, R8A774B1_CLK_S3D1), + DEF_MOD("hscif1", 519, R8A774B1_CLK_S3D1), + DEF_MOD("hscif0", 520, R8A774B1_CLK_S3D1), + DEF_MOD("thermal", 522, R8A774B1_CLK_CP), + DEF_MOD("pwm", 523, R8A774B1_CLK_S0D12), + DEF_MOD("fcpvd1", 602, R8A774B1_CLK_S0D2), + DEF_MOD("fcpvd0", 603, R8A774B1_CLK_S0D2), + DEF_MOD("fcpvb0", 607, R8A774B1_CLK_S0D1), + DEF_MOD("fcpvi0", 611, R8A774B1_CLK_S0D1), + DEF_MOD("fcpf0", 615, R8A774B1_CLK_S0D1), + DEF_MOD("fcpcs", 619, R8A774B1_CLK_S0D2), + DEF_MOD("vspd1", 622, R8A774B1_CLK_S0D2), + DEF_MOD("vspd0", 623, R8A774B1_CLK_S0D2), + DEF_MOD("vspb", 626, R8A774B1_CLK_S0D1), + DEF_MOD("vspi0", 631, R8A774B1_CLK_S0D1), + DEF_MOD("ehci1", 702, R8A774B1_CLK_S3D2), + DEF_MOD("ehci0", 703, R8A774B1_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A774B1_CLK_S3D2), + DEF_MOD("csi20", 714, R8A774B1_CLK_CSI0), + DEF_MOD("csi40", 716, R8A774B1_CLK_CSI0), + DEF_MOD("du3", 721, R8A774B1_CLK_S2D1), + DEF_MOD("du1", 723, R8A774B1_CLK_S2D1), + DEF_MOD("du0", 724, R8A774B1_CLK_S2D1), + DEF_MOD("lvds", 727, R8A774B1_CLK_S2D1), + DEF_MOD("hdmi0", 729, R8A774B1_CLK_HDMI), + DEF_MOD("vin7", 804, R8A774B1_CLK_S0D2), + DEF_MOD("vin6", 805, R8A774B1_CLK_S0D2), + DEF_MOD("vin5", 806, R8A774B1_CLK_S0D2), + DEF_MOD("vin4", 807, R8A774B1_CLK_S0D2), + DEF_MOD("vin3", 808, R8A774B1_CLK_S0D2), + DEF_MOD("vin2", 809, R8A774B1_CLK_S0D2), + DEF_MOD("vin1", 810, R8A774B1_CLK_S0D2), + DEF_MOD("vin0", 811, R8A774B1_CLK_S0D2), + DEF_MOD("etheravb", 812, R8A774B1_CLK_S0D6), + DEF_MOD("sata0", 815, R8A774B1_CLK_S3D2), + DEF_MOD("gpio7", 905, R8A774B1_CLK_S3D4), + DEF_MOD("gpio6", 906, R8A774B1_CLK_S3D4), + DEF_MOD("gpio5", 907, R8A774B1_CLK_S3D4), + DEF_MOD("gpio4", 908, R8A774B1_CLK_S3D4), + DEF_MOD("gpio3", 909, R8A774B1_CLK_S3D4), + DEF_MOD("gpio2", 910, R8A774B1_CLK_S3D4), + DEF_MOD("gpio1", 911, R8A774B1_CLK_S3D4), + DEF_MOD("gpio0", 912, R8A774B1_CLK_S3D4), + DEF_MOD("can-fd", 914, R8A774B1_CLK_S3D2), + DEF_MOD("can-if1", 915, R8A774B1_CLK_S3D4), + DEF_MOD("can-if0", 916, R8A774B1_CLK_S3D4), + DEF_MOD("i2c6", 918, R8A774B1_CLK_S0D6), + DEF_MOD("i2c5", 919, R8A774B1_CLK_S0D6), + DEF_MOD("i2c-dvfs", 926, R8A774B1_CLK_CP), + DEF_MOD("i2c4", 927, R8A774B1_CLK_S0D6), + DEF_MOD("i2c3", 928, R8A774B1_CLK_S0D6), + DEF_MOD("i2c2", 929, R8A774B1_CLK_S3D2), + DEF_MOD("i2c1", 930, R8A774B1_CLK_S3D2), + DEF_MOD("i2c0", 931, R8A774B1_CLK_S3D2), + DEF_MOD("ssi-all", 1005, R8A774B1_CLK_S3D4), + DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), + DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), + DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)), + DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)), + DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)), + DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), + DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), + DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)), + DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)), + DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)), + DEF_MOD("scu-all", 1017, R8A774B1_CLK_S3D4), + DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), + DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), + DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)), + DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)), + DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)), + DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), + DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), + DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)), + DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)), + DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)), + DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)), + DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)), +}; + +/* + * CPG Clock Data + */ + +/* + * MD EXTAL PLL0 PLL1 PLL3 PLL4 OSC + * 14 13 19 17 (MHz) + *----------------------------------------------------------------- + * 0 0 0 0 16.66 x 1 x180 x192 x192 x144 /16 + * 0 0 0 1 16.66 x 1 x180 x192 x128 x144 /16 + * 0 0 1 0 Prohibited setting + * 0 0 1 1 16.66 x 1 x180 x192 x192 x144 /16 + * 0 1 0 0 20 x 1 x150 x160 x160 x120 /19 + * 0 1 0 1 20 x 1 x150 x160 x106 x120 /19 + * 0 1 1 0 Prohibited setting + * 0 1 1 1 20 x 1 x150 x160 x160 x120 /19 + * 1 0 0 0 25 x 1 x120 x128 x128 x96 /24 + * 1 0 0 1 25 x 1 x120 x128 x84 x96 /24 + * 1 0 1 0 Prohibited setting + * 1 0 1 1 25 x 1 x120 x128 x128 x96 /24 + * 1 1 0 0 33.33 / 2 x180 x192 x192 x144 /32 + * 1 1 0 1 33.33 / 2 x180 x192 x128 x144 /32 + * 1 1 1 0 Prohibited setting + * 1 1 1 1 33.33 / 2 x180 x192 x192 x144 /32 + */ +#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ + (((md) & BIT(13)) >> 11) | \ + (((md) & BIT(19)) >> 18) | \ + (((md) & BIT(17)) >> 17)) + +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] = { + /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 192, 1, 128, 1, 16, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 160, 1, 106, 1, 19, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 128, 1, 128, 1, 24, }, + { 1, 128, 1, 84, 1, 24, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, 24, }, + { 2, 192, 1, 192, 1, 32, }, + { 2, 192, 1, 128, 1, 32, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, 32, }, +}; + +/* RMSTPCR[0-11] is not present on RZ/G2N */ +static const struct mstp_stop_table r8a774b1_mstp_table[] = { + { 0x00200000, 0x0, 0x0, 0 }, + { 0xFFFFFFFF, 0x0, 0x0, 0 }, + { 0x340E2FDC, 0x2040, 0x0, 0 }, + { 0xFFFFFFDF, 0x400, 0x0, 0 }, + { 0x80000184, 0x180, 0x0, 0 }, + { 0xC3FFFFFF, 0x0, 0x0, 0 }, + { 0xFFFFFFFF, 0x0, 0x0, 0 }, + { 0xFFFFFFFF, 0x0, 0x0, 0 }, + { 0x01F1FFF7, 0x0, 0x0, 0 }, + { 0xFFFFFFFE, 0x0, 0x0, 0 }, + { 0xFFFEFFE0, 0x0, 0x0, 0 }, + { 0x000000B7, 0x0, 0x0, 0 }, +}; + +static const void *r8a774b1_get_pll_config(const u32 cpg_mode) +{ + return &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; +} + +static const struct cpg_mssr_info r8a774b1_cpg_mssr_info = { + .core_clk = r8a774b1_core_clks, + .core_clk_size = ARRAY_SIZE(r8a774b1_core_clks), + .mod_clk = r8a774b1_mod_clks, + .mod_clk_size = ARRAY_SIZE(r8a774b1_mod_clks), + .mstp_table = r8a774b1_mstp_table, + .mstp_table_size = ARRAY_SIZE(r8a774b1_mstp_table), + .reset_node = "renesas,r8a774b1-rst", + .extalr_node = "extalr", + .mod_clk_base = MOD_CLK_BASE, + .clk_extal_id = CLK_EXTAL, + .clk_extalr_id = CLK_EXTALR, + .get_pll_config = r8a774b1_get_pll_config, +}; + +static const struct udevice_id r8a774b1_clk_ids[] = { + { + .compatible = "renesas,r8a774b1-cpg-mssr", + .data = (ulong)&r8a774b1_cpg_mssr_info, + }, + { } +}; + +U_BOOT_DRIVER(clk_r8a774b1) = { + .name = "clk_r8a774b1", + .id = UCLASS_CLK, + .of_match = r8a774b1_clk_ids, + .priv_auto_alloc_size = sizeof(struct gen3_clk_priv), + .ops = &gen3_clk_ops, + .probe = gen3_clk_probe, + .remove = gen3_clk_remove, +}; diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c new file mode 100644 index 00000000000..c9f0f7221d7 --- /dev/null +++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * r8a774c0 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2020 Renesas Electronics Corp. + * + * Based on r8a77990-cpg-mssr.c + * + * Copyright (C) 2015 Glider bvba + * Copyright (C) 2020 Renesas Electronics Corp. + */ + +#include <common.h> +#include <clk-uclass.h> +#include <dm.h> +#include <linux/bitops.h> + +#include <dt-bindings/clock/r8a774c0-cpg-mssr.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A774C0_CLK_CANFD, + + /* External Input Clocks */ + CLK_EXTAL, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL3, + CLK_PLL0D4, + CLK_PLL0D6, + CLK_PLL0D8, + CLK_PLL0D20, + CLK_PLL0D24, + CLK_PLL1D2, + CLK_PE, + CLK_S0, + CLK_S1, + CLK_S2, + CLK_S3, + CLK_SDSRC, + CLK_RINT, + CLK_OCO, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a774c0_core_clks[] = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), + + DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100), + DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1), + DEF_FIXED(".pll0d6", CLK_PLL0D6, CLK_PLL0, 6, 1), + DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1), + DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1), + DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1), + DEF_FIXED(".pll1d2", CLK_PLL1D2, CLK_PLL1, 2, 1), + DEF_FIXED(".pe", CLK_PE, CLK_PLL0D20, 1, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1, 2, 1), + DEF_FIXED(".s1", CLK_S1, CLK_PLL1, 3, 1), + DEF_FIXED(".s2", CLK_S2, CLK_PLL1, 4, 1), + DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1), + DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1), + + DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), + + DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000), + + /* Core Clock Outputs */ + DEF_FIXED("za2", R8A774C0_CLK_ZA2, CLK_PLL0D24, 1, 1), + DEF_FIXED("za8", R8A774C0_CLK_ZA8, CLK_PLL0D8, 1, 1), + DEF_GEN3_Z("z2", R8A774C0_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL0, 4, 8), + DEF_FIXED("ztr", R8A774C0_CLK_ZTR, CLK_PLL1, 6, 1), + DEF_FIXED("zt", R8A774C0_CLK_ZT, CLK_PLL1, 4, 1), + DEF_FIXED("zx", R8A774C0_CLK_ZX, CLK_PLL1, 3, 1), + DEF_FIXED("s0d1", R8A774C0_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s0d3", R8A774C0_CLK_S0D3, CLK_S0, 3, 1), + DEF_FIXED("s0d6", R8A774C0_CLK_S0D6, CLK_S0, 6, 1), + DEF_FIXED("s0d12", R8A774C0_CLK_S0D12, CLK_S0, 12, 1), + DEF_FIXED("s0d24", R8A774C0_CLK_S0D24, CLK_S0, 24, 1), + DEF_FIXED("s1d1", R8A774C0_CLK_S1D1, CLK_S1, 1, 1), + DEF_FIXED("s1d2", R8A774C0_CLK_S1D2, CLK_S1, 2, 1), + DEF_FIXED("s1d4", R8A774C0_CLK_S1D4, CLK_S1, 4, 1), + DEF_FIXED("s2d1", R8A774C0_CLK_S2D1, CLK_S2, 1, 1), + DEF_FIXED("s2d2", R8A774C0_CLK_S2D2, CLK_S2, 2, 1), + DEF_FIXED("s2d4", R8A774C0_CLK_S2D4, CLK_S2, 4, 1), + DEF_FIXED("s3d1", R8A774C0_CLK_S3D1, CLK_S3, 1, 1), + DEF_FIXED("s3d2", R8A774C0_CLK_S3D2, CLK_S3, 2, 1), + DEF_FIXED("s3d4", R8A774C0_CLK_S3D4, CLK_S3, 4, 1), + + DEF_GEN3_SD("sd0", R8A774C0_CLK_SD0, CLK_SDSRC, 0x0074), + DEF_GEN3_SD("sd1", R8A774C0_CLK_SD1, CLK_SDSRC, 0x0078), + DEF_GEN3_SD("sd3", R8A774C0_CLK_SD3, CLK_SDSRC, 0x026c), + + DEF_FIXED("cl", R8A774C0_CLK_CL, CLK_PLL1, 48, 1), + DEF_FIXED("cp", R8A774C0_CLK_CP, CLK_EXTAL, 2, 1), + DEF_FIXED("cpex", R8A774C0_CLK_CPEX, CLK_EXTAL, 4, 1), + + DEF_DIV6_RO("osc", R8A774C0_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), + + DEF_GEN3_PE("s0d6c", R8A774C0_CLK_S0D6C, CLK_S0, 6, CLK_PE, 2), + DEF_GEN3_PE("s3d1c", R8A774C0_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1), + DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2), + DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4), + + DEF_DIV6P1("canfd", R8A774C0_CLK_CANFD, CLK_PLL0D6, 0x244), + DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c), + DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014), + + DEF_GEN3_RCKSEL("r", R8A774C0_CLK_R, CLK_RINT, 1, CLK_OCO, 61 * 4), +}; + +static const struct mssr_mod_clk r8a774c0_mod_clks[] = { + DEF_MOD("tmu4", 121, R8A774C0_CLK_S0D6C), + DEF_MOD("tmu3", 122, R8A774C0_CLK_S3D2C), + DEF_MOD("tmu2", 123, R8A774C0_CLK_S3D2C), + DEF_MOD("tmu1", 124, R8A774C0_CLK_S3D2C), + DEF_MOD("tmu0", 125, R8A774C0_CLK_CP), + DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C), + DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C), + DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C), + DEF_MOD("scif1", 206, R8A774C0_CLK_S3D4C), + DEF_MOD("scif0", 207, R8A774C0_CLK_S3D4C), + DEF_MOD("msiof3", 208, R8A774C0_CLK_MSO), + DEF_MOD("msiof2", 209, R8A774C0_CLK_MSO), + DEF_MOD("msiof1", 210, R8A774C0_CLK_MSO), + DEF_MOD("msiof0", 211, R8A774C0_CLK_MSO), + DEF_MOD("sys-dmac2", 217, R8A774C0_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A774C0_CLK_S3D1), + DEF_MOD("sys-dmac0", 219, R8A774C0_CLK_S3D1), + + DEF_MOD("cmt3", 300, R8A774C0_CLK_R), + DEF_MOD("cmt2", 301, R8A774C0_CLK_R), + DEF_MOD("cmt1", 302, R8A774C0_CLK_R), + DEF_MOD("cmt0", 303, R8A774C0_CLK_R), + DEF_MOD("scif2", 310, R8A774C0_CLK_S3D4C), + DEF_MOD("sdif3", 311, R8A774C0_CLK_SD3), + DEF_MOD("sdif1", 313, R8A774C0_CLK_SD1), + DEF_MOD("sdif0", 314, R8A774C0_CLK_SD0), + DEF_MOD("pcie0", 319, R8A774C0_CLK_S3D1), + DEF_MOD("usb3-if0", 328, R8A774C0_CLK_S3D1), + DEF_MOD("usb-dmac0", 330, R8A774C0_CLK_S3D1), + DEF_MOD("usb-dmac1", 331, R8A774C0_CLK_S3D1), + + DEF_MOD("rwdt", 402, R8A774C0_CLK_R), + DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP), + DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3), + + DEF_MOD("audmac0", 502, R8A774C0_CLK_S1D2), + DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C), + DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C), + DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C), + DEF_MOD("hscif1", 519, R8A774C0_CLK_S3D1C), + DEF_MOD("hscif0", 520, R8A774C0_CLK_S3D1C), + DEF_MOD("thermal", 522, R8A774C0_CLK_CP), + DEF_MOD("pwm", 523, R8A774C0_CLK_S3D4C), + + DEF_MOD("fcpvd1", 602, R8A774C0_CLK_S1D2), + DEF_MOD("fcpvd0", 603, R8A774C0_CLK_S1D2), + DEF_MOD("fcpvb0", 607, R8A774C0_CLK_S0D1), + DEF_MOD("fcpvi0", 611, R8A774C0_CLK_S0D1), + DEF_MOD("fcpf0", 615, R8A774C0_CLK_S0D1), + DEF_MOD("fcpcs", 619, R8A774C0_CLK_S0D1), + DEF_MOD("vspd1", 622, R8A774C0_CLK_S1D2), + DEF_MOD("vspd0", 623, R8A774C0_CLK_S1D2), + DEF_MOD("vspb", 626, R8A774C0_CLK_S0D1), + DEF_MOD("vspi0", 631, R8A774C0_CLK_S0D1), + + DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D2), + DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0), + DEF_MOD("du1", 723, R8A774C0_CLK_S1D1), + DEF_MOD("du0", 724, R8A774C0_CLK_S1D1), + DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1), + + DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2), + DEF_MOD("vin4", 807, R8A774C0_CLK_S1D2), + DEF_MOD("etheravb", 812, R8A774C0_CLK_S3D2), + + DEF_MOD("gpio6", 906, R8A774C0_CLK_S3D4), + DEF_MOD("gpio5", 907, R8A774C0_CLK_S3D4), + DEF_MOD("gpio4", 908, R8A774C0_CLK_S3D4), + DEF_MOD("gpio3", 909, R8A774C0_CLK_S3D4), + DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4), + DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4), + DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4), + DEF_MOD("can-fd", 914, R8A774C0_CLK_S3D2), + DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4), + DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4), + DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2), + DEF_MOD("i2c5", 919, R8A774C0_CLK_S3D2), + DEF_MOD("i2c-dvfs", 926, R8A774C0_CLK_CP), + DEF_MOD("i2c4", 927, R8A774C0_CLK_S3D2), + DEF_MOD("i2c3", 928, R8A774C0_CLK_S3D2), + DEF_MOD("i2c2", 929, R8A774C0_CLK_S3D2), + DEF_MOD("i2c1", 930, R8A774C0_CLK_S3D2), + DEF_MOD("i2c0", 931, R8A774C0_CLK_S3D2), + + DEF_MOD("i2c7", 1003, R8A774C0_CLK_S3D2), + DEF_MOD("ssi-all", 1005, R8A774C0_CLK_S3D4), + DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), + DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), + DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)), + DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)), + DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)), + DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), + DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), + DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)), + DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)), + DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)), + DEF_MOD("scu-all", 1017, R8A774C0_CLK_S3D4), + DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), + DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), + DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)), + DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)), + DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)), + DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), + DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), + DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)), + DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)), + DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)), + DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)), + DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)), +}; + +/* + * CPG Clock Data + */ + +/* + * MD19 EXTAL (MHz) PLL0 PLL1 PLL3 + *-------------------------------------------------------------------- + * 0 48 x 1 x100/1 x100/3 x100/3 + * 1 48 x 1 x100/1 x100/3 x58/3 + */ +#define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19) + +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[2] = { + /* EXTAL div PLL1 mult/div PLL3 mult/div */ + { 1, 100, 3, 100, 3, }, + { 1, 100, 3, 58, 3, }, +}; + +static const struct mstp_stop_table r8a774c0_mstp_table[] = { + { 0x00200000, 0x0, 0x00200000, 0 }, + { 0xFFFFFFFF, 0x0, 0xFFFFFFFF, 0 }, + { 0x340E2FDC, 0x2040, 0x340E2FDC, 0 }, + { 0xFFFFFFDF, 0x400, 0xFFFFFFDF, 0 }, + { 0x80000184, 0x180, 0x80000184, 0 }, + { 0xC3FFFFFF, 0x0, 0xC3FFFFFF, 0 }, + { 0xFFFFFFFF, 0x0, 0xFFFFFFFF, 0 }, + { 0xFFFFFFFF, 0x0, 0xFFFFFFFF, 0 }, + { 0x01F1FFF7, 0x0, 0x01F1FFF7, 0 }, + { 0xFFFFFFFE, 0x0, 0xFFFFFFFE, 0 }, + { 0xFFFEFFE0, 0x0, 0xFFFEFFE0, 0 }, + { 0x000000B7, 0x0, 0x000000B7, 0 }, +}; + +static const void *r8a774c0_get_pll_config(const u32 cpg_mode) +{ + return &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; +} + +const struct cpg_mssr_info r8a774c0_cpg_mssr_info = { + .core_clk = r8a774c0_core_clks, + .core_clk_size = ARRAY_SIZE(r8a774c0_core_clks), + .mod_clk = r8a774c0_mod_clks, + .mod_clk_size = ARRAY_SIZE(r8a774c0_mod_clks), + .mstp_table = r8a774c0_mstp_table, + .mstp_table_size = ARRAY_SIZE(r8a774c0_mstp_table), + .reset_node = "renesas,r8a774c0-rst", + .mod_clk_base = MOD_CLK_BASE, + .clk_extal_id = CLK_EXTAL, + .clk_extalr_id = ~0, + .get_pll_config = r8a774c0_get_pll_config, +}; + +static const struct udevice_id r8a774c0_clk_ids[] = { + { + .compatible = "renesas,r8a774c0-cpg-mssr", + .data = (ulong)&r8a774c0_cpg_mssr_info + }, + { } +}; + +U_BOOT_DRIVER(clk_r8a774c0) = { + .name = "clk_r8a774c0", + .id = UCLASS_CLK, + .of_match = r8a774c0_clk_ids, + .priv_auto_alloc_size = sizeof(struct gen3_clk_priv), + .ops = &gen3_clk_ops, + .probe = gen3_clk_probe, + .remove = gen3_clk_remove, +}; diff --git a/drivers/clk/renesas/r8a774e1-cpg-mssr.c b/drivers/clk/renesas/r8a774e1-cpg-mssr.c new file mode 100644 index 00000000000..6cce007aa13 --- /dev/null +++ b/drivers/clk/renesas/r8a774e1-cpg-mssr.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * r8a774e1 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2020 Renesas Electronics Corp. + * + * Based on r8a7795-cpg-mssr.c + * + * Copyright (C) 2015 Glider bvba + */ + +#include <common.h> +#include <clk-uclass.h> +#include <dm.h> +#include <linux/bitops.h> + +#include <dt-bindings/clock/r8a774e1-cpg-mssr.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A774E1_CLK_CANFD, + + /* External Input Clocks */ + CLK_EXTAL, + CLK_EXTALR, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL2, + CLK_PLL3, + CLK_PLL4, + CLK_PLL1_DIV2, + CLK_PLL1_DIV4, + CLK_S0, + CLK_S1, + CLK_S2, + CLK_S3, + CLK_SDSRC, + CLK_RPCSRC, + CLK_RINT, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a774e1_core_clks[] = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("extalr", CLK_EXTALR), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), + DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), + DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), + DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN), + + DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), + DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), + DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), + + DEF_BASE("rpc", R8A774E1_CLK_RPC, CLK_TYPE_GEN3_RPC, + CLK_RPCSRC), + DEF_BASE("rpcd2", R8A774E1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, + R8A774E1_CLK_RPC), + + DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), + + /* Core Clock Outputs */ + DEF_GEN3_Z("z", R8A774E1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8), + DEF_GEN3_Z("z2", R8A774E1_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0), + DEF_FIXED("ztr", R8A774E1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED("ztrd2", R8A774E1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), + DEF_FIXED("zt", R8A774E1_CLK_ZT, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED("zx", R8A774E1_CLK_ZX, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED("s0d1", R8A774E1_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s0d2", R8A774E1_CLK_S0D2, CLK_S0, 2, 1), + DEF_FIXED("s0d3", R8A774E1_CLK_S0D3, CLK_S0, 3, 1), + DEF_FIXED("s0d4", R8A774E1_CLK_S0D4, CLK_S0, 4, 1), + DEF_FIXED("s0d6", R8A774E1_CLK_S0D6, CLK_S0, 6, 1), + DEF_FIXED("s0d8", R8A774E1_CLK_S0D8, CLK_S0, 8, 1), + DEF_FIXED("s0d12", R8A774E1_CLK_S0D12, CLK_S0, 12, 1), + DEF_FIXED("s1d2", R8A774E1_CLK_S1D2, CLK_S1, 2, 1), + DEF_FIXED("s1d4", R8A774E1_CLK_S1D4, CLK_S1, 4, 1), + DEF_FIXED("s2d1", R8A774E1_CLK_S2D1, CLK_S2, 1, 1), + DEF_FIXED("s2d2", R8A774E1_CLK_S2D2, CLK_S2, 2, 1), + DEF_FIXED("s2d4", R8A774E1_CLK_S2D4, CLK_S2, 4, 1), + DEF_FIXED("s3d1", R8A774E1_CLK_S3D1, CLK_S3, 1, 1), + DEF_FIXED("s3d2", R8A774E1_CLK_S3D2, CLK_S3, 2, 1), + DEF_FIXED("s3d4", R8A774E1_CLK_S3D4, CLK_S3, 4, 1), + + DEF_GEN3_SD("sd0", R8A774E1_CLK_SD0, CLK_SDSRC, 0x074), + DEF_GEN3_SD("sd1", R8A774E1_CLK_SD1, CLK_SDSRC, 0x078), + DEF_GEN3_SD("sd2", R8A774E1_CLK_SD2, CLK_SDSRC, 0x268), + DEF_GEN3_SD("sd3", R8A774E1_CLK_SD3, CLK_SDSRC, 0x26c), + + DEF_FIXED("cl", R8A774E1_CLK_CL, CLK_PLL1_DIV2, 48, 1), + DEF_FIXED("cr", R8A774E1_CLK_CR, CLK_PLL1_DIV4, 2, 1), + DEF_FIXED("cp", R8A774E1_CLK_CP, CLK_EXTAL, 2, 1), + DEF_FIXED("cpex", R8A774E1_CLK_CPEX, CLK_EXTAL, 2, 1), + + DEF_DIV6P1("canfd", R8A774E1_CLK_CANFD, CLK_PLL1_DIV4, 0x244), + DEF_DIV6P1("csi0", R8A774E1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), + DEF_DIV6P1("mso", R8A774E1_CLK_MSO, CLK_PLL1_DIV4, 0x014), + DEF_DIV6P1("hdmi", R8A774E1_CLK_HDMI, CLK_PLL1_DIV4, 0x250), + + DEF_GEN3_OSC("osc", R8A774E1_CLK_OSC, CLK_EXTAL, 8), + + DEF_BASE("r", R8A774E1_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), +}; + +static const struct mssr_mod_clk r8a774e1_mod_clks[] = { + DEF_MOD("fdp1-1", 118, R8A774E1_CLK_S0D1), + DEF_MOD("fdp1-0", 119, R8A774E1_CLK_S0D1), + DEF_MOD("tmu4", 121, R8A774E1_CLK_S0D6), + DEF_MOD("tmu3", 122, R8A774E1_CLK_S3D2), + DEF_MOD("tmu2", 123, R8A774E1_CLK_S3D2), + DEF_MOD("tmu1", 124, R8A774E1_CLK_S3D2), + DEF_MOD("tmu0", 125, R8A774E1_CLK_CP), + DEF_MOD("vcplf", 130, R8A774E1_CLK_S2D1), + DEF_MOD("vdpb", 131, R8A774E1_CLK_S2D1), + DEF_MOD("scif5", 202, R8A774E1_CLK_S3D4), + DEF_MOD("scif4", 203, R8A774E1_CLK_S3D4), + DEF_MOD("scif3", 204, R8A774E1_CLK_S3D4), + DEF_MOD("scif1", 206, R8A774E1_CLK_S3D4), + DEF_MOD("scif0", 207, R8A774E1_CLK_S3D4), + DEF_MOD("msiof3", 208, R8A774E1_CLK_MSO), + DEF_MOD("msiof2", 209, R8A774E1_CLK_MSO), + DEF_MOD("msiof1", 210, R8A774E1_CLK_MSO), + DEF_MOD("msiof0", 211, R8A774E1_CLK_MSO), + DEF_MOD("sys-dmac2", 217, R8A774E1_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A774E1_CLK_S3D1), + DEF_MOD("sys-dmac0", 219, R8A774E1_CLK_S0D3), + DEF_MOD("cmt3", 300, R8A774E1_CLK_R), + DEF_MOD("cmt2", 301, R8A774E1_CLK_R), + DEF_MOD("cmt1", 302, R8A774E1_CLK_R), + DEF_MOD("cmt0", 303, R8A774E1_CLK_R), + DEF_MOD("tpu0", 304, R8A774E1_CLK_S3D4), + DEF_MOD("scif2", 310, R8A774E1_CLK_S3D4), + DEF_MOD("sdif3", 311, R8A774E1_CLK_SD3), + DEF_MOD("sdif2", 312, R8A774E1_CLK_SD2), + DEF_MOD("sdif1", 313, R8A774E1_CLK_SD1), + DEF_MOD("sdif0", 314, R8A774E1_CLK_SD0), + DEF_MOD("pcie1", 318, R8A774E1_CLK_S3D1), + DEF_MOD("pcie0", 319, R8A774E1_CLK_S3D1), + DEF_MOD("usb3-if0", 328, R8A774E1_CLK_S3D1), + DEF_MOD("usb-dmac0", 330, R8A774E1_CLK_S3D1), + DEF_MOD("usb-dmac1", 331, R8A774E1_CLK_S3D1), + DEF_MOD("rwdt", 402, R8A774E1_CLK_R), + DEF_MOD("intc-ex", 407, R8A774E1_CLK_CP), + DEF_MOD("intc-ap", 408, R8A774E1_CLK_S0D3), + DEF_MOD("audmac1", 501, R8A774E1_CLK_S1D2), + DEF_MOD("audmac0", 502, R8A774E1_CLK_S1D2), + DEF_MOD("hscif4", 516, R8A774E1_CLK_S3D1), + DEF_MOD("hscif3", 517, R8A774E1_CLK_S3D1), + DEF_MOD("hscif2", 518, R8A774E1_CLK_S3D1), + DEF_MOD("hscif1", 519, R8A774E1_CLK_S3D1), + DEF_MOD("hscif0", 520, R8A774E1_CLK_S3D1), + DEF_MOD("thermal", 522, R8A774E1_CLK_CP), + DEF_MOD("pwm", 523, R8A774E1_CLK_S0D12), + DEF_MOD("fcpvd1", 602, R8A774E1_CLK_S0D2), + DEF_MOD("fcpvd0", 603, R8A774E1_CLK_S0D2), + DEF_MOD("fcpvb1", 606, R8A774E1_CLK_S0D1), + DEF_MOD("fcpvb0", 607, R8A774E1_CLK_S0D1), + DEF_MOD("fcpvi1", 610, R8A774E1_CLK_S0D1), + DEF_MOD("fcpvi0", 611, R8A774E1_CLK_S0D1), + DEF_MOD("fcpf1", 614, R8A774E1_CLK_S0D1), + DEF_MOD("fcpf0", 615, R8A774E1_CLK_S0D1), + DEF_MOD("fcpcs", 619, R8A774E1_CLK_S0D1), + DEF_MOD("vspd1", 622, R8A774E1_CLK_S0D2), + DEF_MOD("vspd0", 623, R8A774E1_CLK_S0D2), + DEF_MOD("vspbc", 624, R8A774E1_CLK_S0D1), + DEF_MOD("vspbd", 626, R8A774E1_CLK_S0D1), + DEF_MOD("vspi1", 630, R8A774E1_CLK_S0D1), + DEF_MOD("vspi0", 631, R8A774E1_CLK_S0D1), + DEF_MOD("ehci1", 702, R8A774E1_CLK_S3D2), + DEF_MOD("ehci0", 703, R8A774E1_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A774E1_CLK_S3D2), + DEF_MOD("csi20", 714, R8A774E1_CLK_CSI0), + DEF_MOD("csi40", 716, R8A774E1_CLK_CSI0), + DEF_MOD("du3", 721, R8A774E1_CLK_S2D1), + DEF_MOD("du1", 723, R8A774E1_CLK_S2D1), + DEF_MOD("du0", 724, R8A774E1_CLK_S2D1), + DEF_MOD("lvds", 727, R8A774E1_CLK_S0D4), + DEF_MOD("hdmi0", 729, R8A774E1_CLK_HDMI), + DEF_MOD("vin7", 804, R8A774E1_CLK_S0D2), + DEF_MOD("vin6", 805, R8A774E1_CLK_S0D2), + DEF_MOD("vin5", 806, R8A774E1_CLK_S0D2), + DEF_MOD("vin4", 807, R8A774E1_CLK_S0D2), + DEF_MOD("vin3", 808, R8A774E1_CLK_S0D2), + DEF_MOD("vin2", 809, R8A774E1_CLK_S0D2), + DEF_MOD("vin1", 810, R8A774E1_CLK_S0D2), + DEF_MOD("vin0", 811, R8A774E1_CLK_S0D2), + DEF_MOD("etheravb", 812, R8A774E1_CLK_S0D6), + DEF_MOD("sata0", 815, R8A774E1_CLK_S3D2), + DEF_MOD("gpio7", 905, R8A774E1_CLK_S3D4), + DEF_MOD("gpio6", 906, R8A774E1_CLK_S3D4), + DEF_MOD("gpio5", 907, R8A774E1_CLK_S3D4), + DEF_MOD("gpio4", 908, R8A774E1_CLK_S3D4), + DEF_MOD("gpio3", 909, R8A774E1_CLK_S3D4), + DEF_MOD("gpio2", 910, R8A774E1_CLK_S3D4), + DEF_MOD("gpio1", 911, R8A774E1_CLK_S3D4), + DEF_MOD("gpio0", 912, R8A774E1_CLK_S3D4), + DEF_MOD("can-fd", 914, R8A774E1_CLK_S3D2), + DEF_MOD("can-if1", 915, R8A774E1_CLK_S3D4), + DEF_MOD("can-if0", 916, R8A774E1_CLK_S3D4), + DEF_MOD("rpc-if", 917, R8A774E1_CLK_RPCD2), + DEF_MOD("i2c6", 918, R8A774E1_CLK_S0D6), + DEF_MOD("i2c5", 919, R8A774E1_CLK_S0D6), + DEF_MOD("adg", 922, R8A774E1_CLK_S0D1), + DEF_MOD("i2c-dvfs", 926, R8A774E1_CLK_CP), + DEF_MOD("i2c4", 927, R8A774E1_CLK_S0D6), + DEF_MOD("i2c3", 928, R8A774E1_CLK_S0D6), + DEF_MOD("i2c2", 929, R8A774E1_CLK_S3D2), + DEF_MOD("i2c1", 930, R8A774E1_CLK_S3D2), + DEF_MOD("i2c0", 931, R8A774E1_CLK_S3D2), + DEF_MOD("ssi-all", 1005, R8A774E1_CLK_S3D4), + DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), + DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), + DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)), + DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)), + DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)), + DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), + DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), + DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)), + DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)), + DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)), + DEF_MOD("scu-all", 1017, R8A774E1_CLK_S3D4), + DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), + DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), + DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)), + DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)), + DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)), + DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), + DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), + DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)), + DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)), + DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)), + DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)), + DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)), +}; + +/* + * CPG Clock Data + */ + +/* + * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC + * 14 13 19 17 (MHz) + *------------------------------------------------------------------------- + * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16 + * 0 0 1 0 Prohibited setting + * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19 + * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19 + * 0 1 1 0 Prohibited setting + * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19 + * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24 + * 1 0 1 0 Prohibited setting + * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32 + * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32 + * 1 1 1 0 Prohibited setting + * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32 + */ +#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ + (((md) & BIT(13)) >> 11) | \ + (((md) & BIT(19)) >> 18) | \ + (((md) & BIT(17)) >> 17)) + +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] = { + /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 192, 1, 128, 1, 16, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 160, 1, 106, 1, 19, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 128, 1, 128, 1, 24, }, + { 1, 128, 1, 84, 1, 24, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, 24, }, + { 2, 192, 1, 192, 1, 32, }, + { 2, 192, 1, 128, 1, 32, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, 32, }, +}; + +/* RMSTPCR[0-11] is not present on RZ/G2H */ +static const struct mstp_stop_table r8a774e1_mstp_table[] = { + { 0x00640800, 0x0, 0x0, 0 }, + { 0xF3EE9390, 0x0, 0x0, 0 }, + { 0x340FAFDC, 0x2040, 0x0, 0 }, + { 0xD80C7CDF, 0x400, 0x0, 0 }, + { 0x80000184, 0x180, 0x0, 0 }, + { 0x40BFFF46, 0x0, 0x0, 0 }, + { 0xE5FBEECF, 0x0, 0x0, 0 }, + { 0x39FFFF0E, 0x0, 0x0, 0 }, + { 0x01F19FF4, 0x0, 0x0, 0 }, + { 0xFFDFFFFF, 0x0, 0x0, 0 }, + { 0xFFFEFFE0, 0x0, 0x0, 0 }, + { 0x00000000, 0x0, 0x0, 0 }, +}; + +static const void *r8a774e1_get_pll_config(const u32 cpg_mode) +{ + return &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; +} + +static const struct cpg_mssr_info r8a774e1_cpg_mssr_info = { + .core_clk = r8a774e1_core_clks, + .core_clk_size = ARRAY_SIZE(r8a774e1_core_clks), + .mod_clk = r8a774e1_mod_clks, + .mod_clk_size = ARRAY_SIZE(r8a774e1_mod_clks), + .mstp_table = r8a774e1_mstp_table, + .mstp_table_size = ARRAY_SIZE(r8a774e1_mstp_table), + .reset_node = "renesas,r8a774e1-rst", + .extalr_node = "extalr", + .mod_clk_base = MOD_CLK_BASE, + .clk_extal_id = CLK_EXTAL, + .clk_extalr_id = CLK_EXTALR, + .get_pll_config = r8a774e1_get_pll_config, +}; + +static const struct udevice_id r8a774e1_clk_ids[] = { + { + .compatible = "renesas,r8a774e1-cpg-mssr", + .data = (ulong)&r8a774e1_cpg_mssr_info + }, + { } +}; + +U_BOOT_DRIVER(clk_r8a774e1) = { + .name = "clk_r8a774e1", + .id = UCLASS_CLK, + .of_match = r8a774e1_clk_ids, + .priv_auto_alloc_size = sizeof(struct gen3_clk_priv), + .ops = &gen3_clk_ops, + .probe = gen3_clk_probe, + .remove = gen3_clk_remove, +}; diff --git a/drivers/clk/rockchip/clk_rk3399.c b/drivers/clk/rockchip/clk_rk3399.c index 44fb8ca9569..478d76d428c 100644 --- a/drivers/clk/rockchip/clk_rk3399.c +++ b/drivers/clk/rockchip/clk_rk3399.c @@ -1612,7 +1612,7 @@ static int rk3399_pmuclk_ofdata_to_platdata(struct udevice *dev) static int rk3399_pmuclk_bind(struct udevice *dev) { -#if CONFIG_IS_ENABLED(CONFIG_RESET_ROCKCHIP) +#if CONFIG_IS_ENABLED(RESET_ROCKCHIP) int ret; ret = offsetof(struct rk3399_pmucru, pmucru_softrst_con[0]); diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c index c5148e9a37e..1b4d81d4f06 100644 --- a/drivers/clk/sifive/fu540-prci.c +++ b/drivers/clk/sifive/fu540-prci.c @@ -29,23 +29,23 @@ */ #include <common.h> -#include <asm/io.h> -#include <asm/arch/reset.h> #include <clk-uclass.h> #include <clk.h> #include <div64.h> #include <dm.h> -#include <errno.h> -#include <reset-uclass.h> #include <dm/device.h> +#include <dm/device_compat.h> #include <dm/uclass.h> +#include <dt-bindings/clock/sifive-fu540-prci.h> +#include <dt-bindings/reset/sifive-fu540-prci.h> +#include <errno.h> +#include <reset-uclass.h> +#include <asm/io.h> +#include <asm/arch/reset.h> #include <linux/delay.h> #include <linux/err.h> - #include <linux/math64.h> #include <linux/clk/analogbits-wrpll-cln28hpc.h> -#include <dt-bindings/clock/sifive-fu540-prci.h> -#include <dt-bindings/reset/sifive-fu540-prci.h> /* * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects: diff --git a/drivers/core/Kconfig b/drivers/core/Kconfig index 1ca5d66141b..ffae6f9795f 100644 --- a/drivers/core/Kconfig +++ b/drivers/core/Kconfig @@ -40,10 +40,24 @@ config DM_WARN depends on DM default y help + Enable this to see warnings related to driver model. + + Warnings may help with debugging, such as when expected devices do + not bind correctly. If the option is disabled, dm_warn() is compiled + out - it will do nothing when called. + +config SPL_DM_WARN + bool "Enable warnings in driver model wuth SPL" + depends on SPL_DM + help + Enable this to see warnings related to driver model in SPL + The dm_warn() function can use up quite a bit of space for its strings. By default this is disabled for SPL builds to save space. - This will cause dm_warn() to be compiled out - it will do nothing - when called. + + Warnings may help with debugging, such as when expected devices do + not bind correctly. If the option is disabled, dm_warn() is compiled + out - it will do nothing when called. config DM_DEBUG bool "Enable debug messages in driver model core" @@ -286,4 +300,15 @@ config INTEL_ACPIGEN information such as P states and T stages. Also included is a way to create a GNVS table and set it up. +config BOUNCE_BUFFER + bool "Include bounce buffer API" + help + Some peripherals support DMA from a subset of physically + addressable memory only. To support such peripherals, the + bounce buffer API uses a temporary buffer: it copies data + to/from DMA regions while managing cache operations. + + A second possible use of bounce buffers is their ability to + provide aligned buffers for DMA operations. + endmenu diff --git a/drivers/core/Makefile b/drivers/core/Makefile index 10f4bece335..5edd4e41357 100644 --- a/drivers/core/Makefile +++ b/drivers/core/Makefile @@ -11,7 +11,7 @@ obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o obj-$(CONFIG_DM) += dump.o obj-$(CONFIG_$(SPL_TPL_)REGMAP) += regmap.o obj-$(CONFIG_$(SPL_TPL_)SYSCON) += syscon-uclass.o -obj-$(CONFIG_OF_LIVE) += of_access.o of_addr.o +obj-$(CONFIG_$(SPL_)OF_LIVE) += of_access.o of_addr.o ifndef CONFIG_DM_DEV_READ_INLINE obj-$(CONFIG_OF_CONTROL) += read.o endif diff --git a/drivers/core/device.c b/drivers/core/device.c index e90d70101c2..4b3dcb3b379 100644 --- a/drivers/core/device.c +++ b/drivers/core/device.c @@ -249,7 +249,7 @@ int device_bind_ofnode(struct udevice *parent, const struct driver *drv, } int device_bind_by_name(struct udevice *parent, bool pre_reloc_only, - struct driver_info *info, struct udevice **devp) + const struct driver_info *info, struct udevice **devp) { struct driver *drv; uint platdata_size = 0; @@ -269,9 +269,6 @@ int device_bind_by_name(struct udevice *parent, bool pre_reloc_only, platdata_size, devp); if (ret) return ret; -#if CONFIG_IS_ENABLED(OF_PLATDATA) - info->dev = *devp; -#endif return ret; } @@ -764,9 +761,25 @@ int device_get_global_by_ofnode(ofnode ofnode, struct udevice **devp) int device_get_by_driver_info(const struct driver_info *info, struct udevice **devp) { + struct driver_info *info_base = + ll_entry_start(struct driver_info, driver_info); + int idx = info - info_base; + struct driver_rt *drt = gd_dm_driver_rt() + idx; struct udevice *dev; - dev = info->dev; + dev = drt->dev; + *devp = NULL; + + return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp); +} + +int device_get_by_driver_info_idx(uint idx, struct udevice **devp) +{ + struct driver_rt *drt = gd_dm_driver_rt() + idx; + struct udevice *dev; + + dev = drt->dev; + *devp = NULL; return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp); } diff --git a/drivers/core/lists.c b/drivers/core/lists.c index 5beba9181cc..b23ee3030e5 100644 --- a/drivers/core/lists.c +++ b/drivers/core/lists.c @@ -51,25 +51,81 @@ struct uclass_driver *lists_uclass_lookup(enum uclass_id id) return NULL; } -int lists_bind_drivers(struct udevice *parent, bool pre_reloc_only) +static int bind_drivers_pass(struct udevice *parent, bool pre_reloc_only) { struct driver_info *info = ll_entry_start(struct driver_info, driver_info); const int n_ents = ll_entry_count(struct driver_info, driver_info); - struct driver_info *entry; - struct udevice *dev; + bool missing_parent = false; int result = 0; - int ret; + uint idx; + + /* + * Do one iteration through the driver_info records. For of-platdata, + * bind only devices whose parent is already bound. If we find any + * device we can't bind, set missing_parent to true, which will cause + * this function to be called again. + */ + for (idx = 0; idx < n_ents; idx++) { + struct udevice *par = parent; + const struct driver_info *entry = info + idx; + struct driver_rt *drt = gd_dm_driver_rt() + idx; + struct udevice *dev; + int ret; - for (entry = info; entry != info + n_ents; entry++) { - ret = device_bind_by_name(parent, pre_reloc_only, entry, &dev); - if (ret && ret != -EPERM) { + if (CONFIG_IS_ENABLED(OF_PLATDATA)) { + int parent_idx = driver_info_parent_id(entry); + + if (drt->dev) + continue; + + if (CONFIG_IS_ENABLED(OF_PLATDATA_PARENT) && + parent_idx != -1) { + struct driver_rt *parent_drt; + + parent_drt = gd_dm_driver_rt() + parent_idx; + if (!parent_drt->dev) { + missing_parent = true; + continue; + } + + par = parent_drt->dev; + } + } + ret = device_bind_by_name(par, pre_reloc_only, entry, &dev); + if (!ret) { + if (CONFIG_IS_ENABLED(OF_PLATDATA)) + drt->dev = dev; + } else if (ret != -EPERM) { dm_warn("No match for driver '%s'\n", entry->name); if (!result || ret != -ENOENT) result = ret; } } + return result ? result : missing_parent ? -EAGAIN : 0; +} + +int lists_bind_drivers(struct udevice *parent, bool pre_reloc_only) +{ + int result = 0; + int pass; + + /* + * 10 passes is 10 levels deep in the devicetree, which is plenty. If + * OF_PLATDATA_PARENT is not enabled, then bind_drivers_pass() will + * always succeed on the first pass. + */ + for (pass = 0; pass < 10; pass++) { + int ret; + + ret = bind_drivers_pass(parent, pre_reloc_only); + if (!ret) + break; + if (ret != -EAGAIN && !result) + result = ret; + } + return result; } diff --git a/drivers/core/ofnode.c b/drivers/core/ofnode.c index 7d1b89514c7..a68076bf351 100644 --- a/drivers/core/ofnode.c +++ b/drivers/core/ofnode.c @@ -476,6 +476,28 @@ ofnode ofnode_get_chosen_node(const char *name) return ofnode_path(prop); } +const void *ofnode_read_aliases_prop(const char *propname, int *sizep) +{ + ofnode node; + + node = ofnode_path("/aliases"); + + return ofnode_read_prop(node, propname, sizep); +} + +ofnode ofnode_get_aliases_node(const char *name) +{ + const char *prop; + + prop = ofnode_read_aliases_prop(name, NULL); + if (!prop) + return ofnode_null(); + + debug("%s: node_path: %s\n", __func__, prop); + + return ofnode_path(prop); +} + int ofnode_get_child_count(ofnode parent) { ofnode child; diff --git a/drivers/core/root.c b/drivers/core/root.c index 0726be6b795..5f10d7a39c7 100644 --- a/drivers/core/root.c +++ b/drivers/core/root.c @@ -50,7 +50,6 @@ void dm_fixup_for_gd_move(struct global_data *new_gd) } } -#if defined(CONFIG_NEEDS_MANUAL_RELOC) void fix_drivers(void) { struct driver *drv = @@ -61,7 +60,7 @@ void fix_drivers(void) for (entry = drv; entry != drv + n_ents; entry++) { if (entry->of_match) entry->of_match = (const struct udevice_id *) - ((u32)entry->of_match + gd->reloc_off); + ((ulong)entry->of_match + gd->reloc_off); if (entry->bind) entry->bind += gd->reloc_off; if (entry->probe) @@ -129,8 +128,6 @@ void fix_devices(void) } } -#endif - int dm_init(bool of_live) { int ret; @@ -141,21 +138,19 @@ int dm_init(bool of_live) } INIT_LIST_HEAD(&DM_UCLASS_ROOT_NON_CONST); -#if defined(CONFIG_NEEDS_MANUAL_RELOC) - fix_drivers(); - fix_uclass(); - fix_devices(); -#endif + if (IS_ENABLED(CONFIG_NEEDS_MANUAL_RELOC)) { + fix_drivers(); + fix_uclass(); + fix_devices(); + } ret = device_bind_by_name(NULL, false, &root_info, &DM_ROOT_NON_CONST); if (ret) return ret; #if CONFIG_IS_ENABLED(OF_CONTROL) -# if CONFIG_IS_ENABLED(OF_LIVE) - if (of_live) - DM_ROOT_NON_CONST->node = np_to_ofnode(gd->of_root); + if (CONFIG_IS_ENABLED(OF_LIVE) && of_live) + DM_ROOT_NON_CONST->node = np_to_ofnode(gd_of_root()); else -#endif DM_ROOT_NON_CONST->node = offset_to_ofnode(0); #endif ret = device_probe(DM_ROOT_NON_CONST); @@ -187,6 +182,17 @@ int dm_scan_platdata(bool pre_reloc_only) { int ret; + if (CONFIG_IS_ENABLED(OF_PLATDATA)) { + struct driver_rt *dyn; + int n_ents; + + n_ents = ll_entry_count(struct driver_info, driver_info); + dyn = calloc(n_ents, sizeof(struct driver_rt)); + if (!dyn) + return -ENOMEM; + gd_set_dm_driver_rt(dyn); + } + ret = lists_bind_drivers(DM_ROOT_NON_CONST, pre_reloc_only); if (ret == -ENOENT) { dm_warn("Some drivers were not found\n"); @@ -196,7 +202,7 @@ int dm_scan_platdata(bool pre_reloc_only) return ret; } -#if CONFIG_IS_ENABLED(OF_LIVE) +#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA) static int dm_scan_fdt_live(struct udevice *parent, const struct device_node *node_parent, bool pre_reloc_only) @@ -223,9 +229,7 @@ static int dm_scan_fdt_live(struct udevice *parent, return ret; } -#endif /* CONFIG_IS_ENABLED(OF_LIVE) */ -#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA) /** * dm_scan_fdt_node() - Scan the device tree and bind drivers for a node * @@ -272,24 +276,20 @@ int dm_scan_fdt_dev(struct udevice *dev) if (!dev_of_valid(dev)) return 0; -#if CONFIG_IS_ENABLED(OF_LIVE) if (of_live_active()) return dm_scan_fdt_live(dev, dev_np(dev), gd->flags & GD_FLG_RELOC ? false : true); - else -#endif + return dm_scan_fdt_node(dev, gd->fdt_blob, dev_of_offset(dev), gd->flags & GD_FLG_RELOC ? false : true); } int dm_scan_fdt(const void *blob, bool pre_reloc_only) { -#if CONFIG_IS_ENABLED(OF_LIVE) if (of_live_active()) - return dm_scan_fdt_live(gd->dm_root, gd->of_root, + return dm_scan_fdt_live(gd->dm_root, gd_of_root(), pre_reloc_only); - else -#endif + return dm_scan_fdt_node(gd->dm_root, blob, 0, pre_reloc_only); } @@ -302,10 +302,9 @@ static int dm_scan_fdt_ofnode_path(const void *blob, const char *path, if (!ofnode_valid(node)) return 0; -#if CONFIG_IS_ENABLED(OF_LIVE) if (of_live_active()) return dm_scan_fdt_live(gd->dm_root, node.np, pre_reloc_only); -#endif + return dm_scan_fdt_node(gd->dm_root, blob, node.of_offset, pre_reloc_only); } @@ -348,11 +347,10 @@ int dm_init_and_scan(bool pre_reloc_only) { int ret; -#if CONFIG_IS_ENABLED(OF_PLATDATA) - dm_populate_phandle_data(); -#endif + if (CONFIG_IS_ENABLED(OF_PLATDATA)) + dm_populate_phandle_data(); - ret = dm_init(IS_ENABLED(CONFIG_OF_LIVE)); + ret = dm_init(CONFIG_IS_ENABLED(OF_LIVE)); if (ret) { debug("dm_init() failed: %d\n", ret); return ret; diff --git a/drivers/core/syscon-uclass.c b/drivers/core/syscon-uclass.c index 5be1d527a0a..567d0a4b50a 100644 --- a/drivers/core/syscon-uclass.c +++ b/drivers/core/syscon-uclass.c @@ -4,6 +4,8 @@ * Written by Simon Glass <sjg@chromium.org> */ +#define LOG_CATEGORY UCLASS_SYSCON + #include <common.h> #include <log.h> #include <syscon.h> @@ -140,7 +142,7 @@ int syscon_get_by_driver_data(ulong driver_data, struct udevice **devp) ret = uclass_first_device_drvdata(UCLASS_SYSCON, driver_data, devp); if (ret) - return log_msg_ret("find", ret); + return ret; return 0; } diff --git a/drivers/core/util.c b/drivers/core/util.c index 25b0d76f430..91e93b0cf14 100644 --- a/drivers/core/util.c +++ b/drivers/core/util.c @@ -11,7 +11,7 @@ #include <linux/libfdt.h> #include <vsprintf.h> -#ifdef CONFIG_DM_WARN +#if CONFIG_IS_ENABLED(DM_WARN) void dm_warn(const char *fmt, ...) { va_list args; diff --git a/drivers/ddr/fsl/main.c b/drivers/ddr/fsl/main.c index 84139b85c33..c02badd4a88 100644 --- a/drivers/ddr/fsl/main.c +++ b/drivers/ddr/fsl/main.c @@ -705,7 +705,7 @@ phys_size_t __fsl_ddr_sdram(fsl_ddr_info_t *pinfo) /* Compute it once normally. */ #ifdef CONFIG_FSL_DDR_INTERACTIVE - if (tstc() && (getc() == 'd')) { /* we got a key press of 'd' */ + if (tstc() && (getchar() == 'd')) { /* we got a key press of 'd' */ total_memory = fsl_ddr_interactive(pinfo, 0); } else if (fsl_ddr_interactive_env_var_exists()) { total_memory = fsl_ddr_interactive(pinfo, 1); diff --git a/drivers/ddr/marvell/a38x/ddr_ml_wrapper.h b/drivers/ddr/marvell/a38x/ddr_ml_wrapper.h index 5bf32399211..ac9250f74ed 100644 --- a/drivers/ddr/marvell/a38x/ddr_ml_wrapper.h +++ b/drivers/ddr/marvell/a38x/ddr_ml_wrapper.h @@ -107,7 +107,7 @@ #define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */ #define MV_FULL (0x14) /* Item is full (Queue or table etc...) */ #define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */ -#define MV_INIT_ERROR (0x16) /* Error occured while INIT process */ +#define MV_INIT_ERROR (0x16) /* Error occurred while INIT process */ #define MV_HW_ERROR (0x17) /* Hardware error */ #define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */ #define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */ diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 57d9fbfabbf..94216045ccc 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1134,7 +1134,9 @@ err_free_res: static void udma_free_chan_resources(struct udma_chan *uc) { - /* Some configuration to UDMA-P channel: disable, reset, whatever */ + /* Hard reset UDMA channel */ + udma_stop_hard(uc); + udma_reset_counters(uc); /* Release PSI-L pairing */ udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread); diff --git a/drivers/firmware/firmware-zynqmp.c b/drivers/firmware/firmware-zynqmp.c index 7583f24a200..d4dc856bafa 100644 --- a/drivers/firmware/firmware-zynqmp.c +++ b/drivers/firmware/firmware-zynqmp.c @@ -165,6 +165,14 @@ int __maybe_unused xilinx_pm_request(u32 api_id, u32 arg0, u32 arg1, u32 arg2, */ u32 regs[] = {api_id, arg0, arg1, arg2, arg3}; + if (api_id == PM_FPGA_LOAD) { + /* Swap addr_hi/low because of incompatibility */ + u32 temp = regs[1]; + + regs[1] = regs[2]; + regs[2] = temp; + } + ipi_req(regs, PAYLOAD_ARG_CNT, ret_payload, PAYLOAD_ARG_CNT); #else return -EPERM; diff --git a/drivers/firmware/scmi/mailbox_agent.c b/drivers/firmware/scmi/mailbox_agent.c index 7d9fb3622ed..a85cff1097e 100644 --- a/drivers/firmware/scmi/mailbox_agent.c +++ b/drivers/firmware/scmi/mailbox_agent.c @@ -5,6 +5,7 @@ #include <common.h> #include <dm.h> +#include <dm/device_compat.h> #include <errno.h> #include <mailbox.h> #include <scmi_agent.h> diff --git a/drivers/firmware/scmi/scmi_agent-uclass.c b/drivers/firmware/scmi/scmi_agent-uclass.c index 77160b19995..7dc533149b6 100644 --- a/drivers/firmware/scmi/scmi_agent-uclass.c +++ b/drivers/firmware/scmi/scmi_agent-uclass.c @@ -5,6 +5,7 @@ #include <common.h> #include <dm.h> +#include <dm/device_compat.h> #include <errno.h> #include <scmi_agent-uclass.h> #include <scmi_protocols.h> diff --git a/drivers/firmware/scmi/smt.c b/drivers/firmware/scmi/smt.c index ce8fe499390..d25478796aa 100644 --- a/drivers/firmware/scmi/smt.c +++ b/drivers/firmware/scmi/smt.c @@ -7,6 +7,7 @@ #include <common.h> #include <cpu_func.h> #include <dm.h> +#include <dm/device_compat.h> #include <errno.h> #include <scmi_agent.h> #include <asm/cache.h> diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index fe398a1d496..425b52a9266 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -31,16 +31,16 @@ config FPGA_CYCLON2 Enable FPGA driver for loading bitstream in BIT and BIN format on Altera Cyclone II device. -config FPGA_STRATIX10 - bool "Enable Altera FPGA driver for Stratix 10" - depends on TARGET_SOCFPGA_STRATIX10 +config FPGA_INTEL_SDM_MAILBOX + bool "Enable Intel FPGA Full Reconfiguration SDM Mailbox driver" + depends on TARGET_SOCFPGA_STRATIX10 || TARGET_SOCFPGA_AGILEX select FPGA_ALTERA help - Say Y here to enable the Altera Stratix 10 FPGA specific driver + Say Y here to enable the Intel FPGA Full Reconfig SDM Mailbox driver - This provides common functionality for Altera Stratix 10 devices. - Enable FPGA driver for writing bitstream into Altera Stratix10 - device. + This provides common functionality for Intel FPGA devices. + Enable FPGA driver for writing full bitstream into Intel FPGA + devices through SDM (Secure Device Manager) Mailbox. config FPGA_XILINX bool "Enable Xilinx FPGA drivers" diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 04e6480f202..83243fb1070 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -16,9 +16,9 @@ ifdef CONFIG_FPGA_ALTERA obj-y += altera.o obj-$(CONFIG_FPGA_ACEX1K) += ACEX1K.o obj-$(CONFIG_FPGA_CYCLON2) += cyclon2.o +obj-$(CONFIG_FPGA_INTEL_SDM_MAILBOX) += intel_sdm_mb.o obj-$(CONFIG_FPGA_STRATIX_II) += stratixII.o obj-$(CONFIG_FPGA_STRATIX_V) += stratixv.o -obj-$(CONFIG_FPGA_STRATIX10) += stratix10.o obj-$(CONFIG_FPGA_SOCFPGA) += socfpga.o obj-$(CONFIG_TARGET_SOCFPGA_GEN5) += socfpga_gen5.o obj-$(CONFIG_TARGET_SOCFPGA_ARRIA10) += socfpga_arria10.o diff --git a/drivers/fpga/altera.c b/drivers/fpga/altera.c index bb27b3778f3..10c0475d259 100644 --- a/drivers/fpga/altera.c +++ b/drivers/fpga/altera.c @@ -40,12 +40,13 @@ static const struct altera_fpga { #if defined(CONFIG_FPGA_STRATIX_V) { Altera_StratixV, "StratixV", stratixv_load, NULL, NULL }, #endif -#if defined(CONFIG_FPGA_STRATIX10) - { Intel_FPGA_Stratix10, "Stratix10", stratix10_load, NULL, NULL }, -#endif #if defined(CONFIG_FPGA_SOCFPGA) { Altera_SoCFPGA, "SoC FPGA", socfpga_load, NULL, NULL }, #endif +#if defined(CONFIG_FPGA_INTEL_SDM_MAILBOX) + { Intel_FPGA_SDM_Mailbox, "Intel SDM Mailbox", intel_sdm_mb_load, NULL, + NULL }, +#endif }; static int altera_validate(Altera_desc *desc, const char *fn) diff --git a/drivers/fpga/stratix10.c b/drivers/fpga/intel_sdm_mb.c index da8fa315e36..9a1dc2c0c83 100644 --- a/drivers/fpga/stratix10.c +++ b/drivers/fpga/intel_sdm_mb.c @@ -6,6 +6,7 @@ #include <common.h> #include <altera.h> #include <log.h> +#include <watchdog.h> #include <asm/arch/mailbox_s10.h> #include <linux/delay.h> @@ -113,6 +114,7 @@ static int reconfig_status_polling_resp(void) puts("."); udelay(RECONFIG_STATUS_INTERVAL_DELAY_US); + WATCHDOG_RESET(); } return -ETIMEDOUT; @@ -238,6 +240,7 @@ static int send_reconfig_data(const void *rbf_data, size_t rbf_size, if (resp_err && !xfer_count) return resp_err; } + WATCHDOG_RESET(); } return 0; @@ -247,7 +250,7 @@ static int send_reconfig_data(const void *rbf_data, size_t rbf_size, * This is the interface used by FPGA driver. * Return 0 for success, non-zero for error. */ -int stratix10_load(Altera_desc *desc, const void *rbf_data, size_t rbf_size) +int intel_sdm_mb_load(Altera_desc *desc, const void *rbf_data, size_t rbf_size) { int ret; u32 resp_len = 2; diff --git a/drivers/gpio/dwapb_gpio.c b/drivers/gpio/dwapb_gpio.c index e5e35181940..37916e77716 100644 --- a/drivers/gpio/dwapb_gpio.c +++ b/drivers/gpio/dwapb_gpio.c @@ -40,7 +40,7 @@ struct gpio_dwapb_platdata { const char *name; int bank; int pins; - fdt_addr_t base; + void __iomem *base; }; static int dwapb_gpio_direction_input(struct udevice *dev, unsigned pin) @@ -66,13 +66,6 @@ static int dwapb_gpio_direction_output(struct udevice *dev, unsigned pin, return 0; } -static int dwapb_gpio_get_value(struct udevice *dev, unsigned pin) -{ - struct gpio_dwapb_platdata *plat = dev_get_platdata(dev); - return !!(readl(plat->base + GPIO_EXT_PORT(plat->bank)) & (1 << pin)); -} - - static int dwapb_gpio_set_value(struct udevice *dev, unsigned pin, int val) { struct gpio_dwapb_platdata *plat = dev_get_platdata(dev); @@ -98,6 +91,18 @@ static int dwapb_gpio_get_function(struct udevice *dev, unsigned offset) return GPIOF_INPUT; } +static int dwapb_gpio_get_value(struct udevice *dev, unsigned pin) +{ + struct gpio_dwapb_platdata *plat = dev_get_platdata(dev); + u32 value; + + if (dwapb_gpio_get_function(dev, pin) == GPIOF_OUTPUT) + value = readl(plat->base + GPIO_SWPORT_DR(plat->bank)); + else + value = readl(plat->base + GPIO_EXT_PORT(plat->bank)); + return !!(value & BIT(pin)); +} + static const struct dm_gpio_ops gpio_dwapb_ops = { .direction_input = dwapb_gpio_direction_input, .direction_output = dwapb_gpio_direction_output, @@ -176,7 +181,7 @@ static int gpio_dwapb_bind(struct udevice *dev) if (!plat) return -ENOMEM; - plat->base = base; + plat->base = (void *)base; plat->bank = bank; plat->pins = ofnode_read_u32_default(node, "snps,nr-gpios", 0); @@ -186,7 +191,15 @@ static int gpio_dwapb_bind(struct udevice *dev) * Fall back to node name. This means accessing pins * via bank name won't work. */ - plat->name = ofnode_get_name(node); + char name[32]; + + snprintf(name, sizeof(name), "%s_", + ofnode_get_name(node)); + plat->name = strdup(name); + if (!plat->name) { + kfree(plat); + return -ENOMEM; + } } ret = device_bind_ofnode(dev, dev->driver, plat->name, diff --git a/drivers/gpio/mpc8xxx_gpio.c b/drivers/gpio/mpc8xxx_gpio.c index 1dfd22522c7..27881a73224 100644 --- a/drivers/gpio/mpc8xxx_gpio.c +++ b/drivers/gpio/mpc8xxx_gpio.c @@ -6,12 +6,15 @@ * based on arch/powerpc/include/asm/mpc85xx_gpio.h, which is * * Copyright 2010 eXMeritus, A Boeing Company + * Copyright 2020 NXP */ #include <common.h> #include <dm.h> #include <mapmem.h> #include <asm/gpio.h> +#include <asm/io.h> +#include <dm/of_access.h> struct ccsr_gpio { u32 gpdir; @@ -20,6 +23,7 @@ struct ccsr_gpio { u32 gpier; u32 gpimr; u32 gpicr; + u32 gpibe; }; struct mpc8xxx_gpio_data { @@ -35,6 +39,7 @@ struct mpc8xxx_gpio_data { */ u32 dat_shadow; ulong type; + bool little_endian; }; enum { @@ -47,33 +52,56 @@ inline u32 gpio_mask(uint gpio) return (1U << (31 - (gpio))); } -static inline u32 mpc8xxx_gpio_get_val(struct ccsr_gpio *base, u32 mask) +static inline u32 mpc8xxx_gpio_get_val(struct udevice *dev, u32 mask) { - return in_be32(&base->gpdat) & mask; + struct mpc8xxx_gpio_data *data = dev_get_priv(dev); + + if (data->little_endian) + return in_le32(&data->base->gpdat) & mask; + else + return in_be32(&data->base->gpdat) & mask; } -static inline u32 mpc8xxx_gpio_get_dir(struct ccsr_gpio *base, u32 mask) +static inline u32 mpc8xxx_gpio_get_dir(struct udevice *dev, u32 mask) { - return in_be32(&base->gpdir) & mask; + struct mpc8xxx_gpio_data *data = dev_get_priv(dev); + + if (data->little_endian) + return in_le32(&data->base->gpdir) & mask; + else + return in_be32(&data->base->gpdir) & mask; } -static inline int mpc8xxx_gpio_open_drain_val(struct ccsr_gpio *base, u32 mask) +static inline int mpc8xxx_gpio_open_drain_val(struct udevice *dev, u32 mask) { - return in_be32(&base->gpodr) & mask; + struct mpc8xxx_gpio_data *data = dev_get_priv(dev); + + if (data->little_endian) + return in_le32(&data->base->gpodr) & mask; + else + return in_be32(&data->base->gpodr) & mask; } -static inline void mpc8xxx_gpio_open_drain_on(struct ccsr_gpio *base, u32 +static inline void mpc8xxx_gpio_open_drain_on(struct udevice *dev, u32 gpios) { + struct mpc8xxx_gpio_data *data = dev_get_priv(dev); /* GPODR register 1 -> open drain on */ - setbits_be32(&base->gpodr, gpios); + if (data->little_endian) + setbits_le32(&data->base->gpodr, gpios); + else + setbits_be32(&data->base->gpodr, gpios); } -static inline void mpc8xxx_gpio_open_drain_off(struct ccsr_gpio *base, +static inline void mpc8xxx_gpio_open_drain_off(struct udevice *dev, u32 gpios) { + struct mpc8xxx_gpio_data *data = dev_get_priv(dev); /* GPODR register 0 -> open drain off (actively driven) */ - clrbits_be32(&base->gpodr, gpios); + if (data->little_endian) + clrbits_le32(&data->base->gpodr, gpios); + else + clrbits_be32(&data->base->gpodr, gpios); } static int mpc8xxx_gpio_direction_input(struct udevice *dev, uint gpio) @@ -82,7 +110,10 @@ static int mpc8xxx_gpio_direction_input(struct udevice *dev, uint gpio) u32 mask = gpio_mask(gpio); /* GPDIR register 0 -> input */ - clrbits_be32(&data->base->gpdir, mask); + if (data->little_endian) + clrbits_le32(&data->base->gpdir, mask); + else + clrbits_be32(&data->base->gpdir, mask); return 0; } @@ -100,10 +131,20 @@ static int mpc8xxx_gpio_set_value(struct udevice *dev, uint gpio, int value) data->dat_shadow &= ~mask; } - gpdir = in_be32(&base->gpdir); + if (data->little_endian) + gpdir = in_le32(&base->gpdir); + else + gpdir = in_be32(&base->gpdir); + gpdir |= gpio_mask(gpio); - out_be32(&base->gpdat, gpdir & data->dat_shadow); - out_be32(&base->gpdir, gpdir); + + if (data->little_endian) { + out_le32(&base->gpdat, gpdir & data->dat_shadow); + out_le32(&base->gpdir, gpdir); + } else { + out_be32(&base->gpdat, gpdir & data->dat_shadow); + out_be32(&base->gpdir, gpdir); + } return 0; } @@ -124,21 +165,20 @@ static int mpc8xxx_gpio_get_value(struct udevice *dev, uint gpio) { struct mpc8xxx_gpio_data *data = dev_get_priv(dev); - if (!!mpc8xxx_gpio_get_dir(data->base, gpio_mask(gpio))) { + if (!!mpc8xxx_gpio_get_dir(dev, gpio_mask(gpio))) { /* Output -> use shadowed value */ return !!(data->dat_shadow & gpio_mask(gpio)); } /* Input -> read value from GPDAT register */ - return !!mpc8xxx_gpio_get_val(data->base, gpio_mask(gpio)); + return !!mpc8xxx_gpio_get_val(dev, gpio_mask(gpio)); } static int mpc8xxx_gpio_get_function(struct udevice *dev, uint gpio) { - struct mpc8xxx_gpio_data *data = dev_get_priv(dev); int dir; - dir = !!mpc8xxx_gpio_get_dir(data->base, gpio_mask(gpio)); + dir = !!mpc8xxx_gpio_get_dir(dev, gpio_mask(gpio)); return dir ? GPIOF_OUTPUT : GPIOF_INPUT; } @@ -146,14 +186,33 @@ static int mpc8xxx_gpio_get_function(struct udevice *dev, uint gpio) static int mpc8xxx_gpio_ofdata_to_platdata(struct udevice *dev) { struct mpc8xxx_gpio_plat *plat = dev_get_platdata(dev); + struct mpc8xxx_gpio_data *data = dev_get_priv(dev); fdt_addr_t addr; - u32 reg[2]; + u32 i; + u32 reg[4]; + + if (ofnode_read_bool(dev->node, "little-endian")) + data->little_endian = true; + + if (data->little_endian) + dev_read_u32_array(dev, "reg", reg, 4); + else + dev_read_u32_array(dev, "reg", reg, 2); + + if (data->little_endian) { + for (i = 0; i < 2; i++) + reg[i] = be32_to_cpu(reg[i]); + } - dev_read_u32_array(dev, "reg", reg, 2); addr = dev_translate_address(dev, reg); plat->addr = addr; - plat->size = reg[1]; + + if (data->little_endian) + plat->size = reg[3]; + else + plat->size = reg[1]; + plat->ngpios = dev_read_u32_default(dev, "ngpios", 32); return 0; @@ -198,6 +257,13 @@ static int mpc8xxx_gpio_probe(struct udevice *dev) if (!str) return -ENOMEM; + if (ofnode_device_is_compatible(dev->node, "fsl,qoriq-gpio")) { + unsigned long gpibe = data->addr + sizeof(struct ccsr_gpio) + - sizeof(u32); + + out_be32((unsigned int *)gpibe, 0xffffffff); + } + uc_priv->bank_name = str; uc_priv->gpio_count = data->gpio_count; diff --git a/drivers/gpio/sandbox.c b/drivers/gpio/sandbox.c index c2f80472b83..eb2600de311 100644 --- a/drivers/gpio/sandbox.c +++ b/drivers/gpio/sandbox.c @@ -185,7 +185,15 @@ static int sb_gpio_set_dir_flags(struct udevice *dev, unsigned int offset, dir_flags = get_gpio_dir_flags(dev, offset); - *dir_flags = flags; + /* + * For testing purposes keep the output value when switching to input. + * This allows us to manipulate the input value via the gpio command. + */ + if (flags & GPIOD_IS_IN) + *dir_flags = (flags & ~GPIOD_IS_OUT_ACTIVE) | + (*dir_flags & GPIOD_IS_OUT_ACTIVE); + else + *dir_flags = flags; return 0; } diff --git a/drivers/gpio/stm32_gpio.c b/drivers/gpio/stm32_gpio.c index 473e364796f..b885cfb57e5 100644 --- a/drivers/gpio/stm32_gpio.c +++ b/drivers/gpio/stm32_gpio.c @@ -18,6 +18,8 @@ #include <linux/errno.h> #include <linux/io.h> +#define STM32_GPIOS_PER_BANK 16 + #define MODE_BITS(gpio_pin) ((gpio_pin) * 2) #define MODE_BITS_MASK 3 #define BSRR_BIT(gpio_pin, value) BIT((gpio_pin) + (value ? 0 : 16)) diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index bd248cbf52b..b37198036c0 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_DM_I2C) += i2c-uclass.o ifdef CONFIG_ACPIGEN obj-$(CONFIG_DM_I2C) += acpi_i2c.o endif -obj-$(CONFIG_DM_I2C_GPIO) += i2c-gpio.o +obj-$(CONFIG_$(SPL_)DM_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_$(SPL_)I2C_CROS_EC_TUNNEL) += cros_ec_tunnel.o obj-$(CONFIG_$(SPL_)I2C_CROS_EC_LDO) += cros_ec_ldo.o diff --git a/drivers/i2c/designware_i2c.c b/drivers/i2c/designware_i2c.c index 569a5d39b49..791f32e971c 100644 --- a/drivers/i2c/designware_i2c.c +++ b/drivers/i2c/designware_i2c.c @@ -807,8 +807,8 @@ int designware_i2c_probe(struct udevice *bus) return -ENXIO; } - log_info("I2C bus %s version %#x\n", bus->name, - readl(&priv->regs->comp_version)); + log_debug("I2C bus %s version %#x\n", bus->name, + readl(&priv->regs->comp_version)); return __dw_i2c_init(priv->regs, 0, 0); } diff --git a/drivers/i2c/i2c-emul-uclass.c b/drivers/i2c/i2c-emul-uclass.c index 1b70e140545..84b6a219d19 100644 --- a/drivers/i2c/i2c-emul-uclass.c +++ b/drivers/i2c/i2c-emul-uclass.c @@ -76,7 +76,9 @@ UCLASS_DRIVER(i2c_emul) = { UCLASS_DRIVER(i2c_emul_parent) = { .id = UCLASS_I2C_EMUL_PARENT, .name = "i2c_emul_parent", +#if !CONFIG_IS_ENABLED(OF_PLATDATA) .post_bind = dm_scan_fdt_dev, +#endif }; static const struct udevice_id i2c_emul_parent_ids[] = { diff --git a/drivers/i2c/i2c-gpio.c b/drivers/i2c/i2c-gpio.c index cfdeadc752c..381938c956f 100644 --- a/drivers/i2c/i2c-gpio.c +++ b/drivers/i2c/i2c-gpio.c @@ -18,8 +18,6 @@ #define I2C_ACK 0 #define I2C_NOACK 1 -DECLARE_GLOBAL_DATA_PTR; - enum { PIN_SDA = 0, PIN_SCL, @@ -334,8 +332,6 @@ static int i2c_gpio_drv_probe(struct udevice *dev) static int i2c_gpio_ofdata_to_platdata(struct udevice *dev) { struct i2c_gpio_bus *bus = dev_get_priv(dev); - const void *blob = gd->fdt_blob; - int node = dev_of_offset(dev); int ret; ret = gpio_request_list_by_name(dev, "gpios", bus->gpios, @@ -343,12 +339,12 @@ static int i2c_gpio_ofdata_to_platdata(struct udevice *dev) if (ret < 0) goto error; - bus->udelay = fdtdec_get_int(blob, node, "i2c-gpio,delay-us", - DEFAULT_UDELAY); + bus->udelay = dev_read_u32_default(dev, "i2c-gpio,delay-us", + DEFAULT_UDELAY); bus->get_sda = i2c_gpio_sda_get; bus->set_sda = i2c_gpio_sda_set; - if (fdtdec_get_bool(blob, node, "i2c-gpio,scl-output-only")) + if (dev_read_bool(dev, "i2c-gpio,scl-output-only")) bus->set_scl = i2c_gpio_scl_set_output_only; else bus->set_scl = i2c_gpio_scl_set; diff --git a/drivers/i2c/sandbox_i2c.c b/drivers/i2c/sandbox_i2c.c index 57b1c60fde6..2cbdaf9cc73 100644 --- a/drivers/i2c/sandbox_i2c.c +++ b/drivers/i2c/sandbox_i2c.c @@ -93,8 +93,8 @@ static const struct udevice_id sandbox_i2c_ids[] = { { } }; -U_BOOT_DRIVER(i2c_sandbox) = { - .name = "i2c_sandbox", +U_BOOT_DRIVER(sandbox_i2c) = { + .name = "sandbox_i2c", .id = UCLASS_I2C, .of_match = sandbox_i2c_ids, .ops = &sandbox_i2c_ops, diff --git a/drivers/led/led_gpio.c b/drivers/led/led_gpio.c index ef9b61ee626..2cdb0269f41 100644 --- a/drivers/led/led_gpio.c +++ b/drivers/led/led_gpio.c @@ -99,11 +99,8 @@ static int led_gpio_bind(struct udevice *parent) const char *label; label = ofnode_read_string(node, "label"); - if (!label) { - debug("%s: node %s has no label\n", __func__, - ofnode_get_name(node)); - return -EINVAL; - } + if (!label) + label = ofnode_get_name(node); ret = device_bind_driver_to_node(parent, "gpio_led", ofnode_get_name(node), node, &dev); diff --git a/drivers/mailbox/zynqmp-ipi.c b/drivers/mailbox/zynqmp-ipi.c index 9483ed9cefc..847a03648b8 100644 --- a/drivers/mailbox/zynqmp-ipi.c +++ b/drivers/mailbox/zynqmp-ipi.c @@ -56,7 +56,7 @@ static int zynqmp_ipi_send(struct mbox_chan *chan, const void *data) /* Wait until observation bit is cleared */ ret = wait_for_bit_le32(&ipi_int_apu->obs, IPI_BIT_MASK_PMU0, false, - 100, false); + 1000, false); debug("%s, send %ld bytes\n", __func__, msg->len); return ret; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index b67e906a76b..29432ae7eb4 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -376,13 +376,6 @@ config SPL_I2C_EEPROM This option is an SPL-variant of the I2C_EEPROM option. See the help of I2C_EEPROM for details. -config ZYNQ_GEM_I2C_MAC_OFFSET - hex "Set the I2C MAC offset" - default 0x0 - depends on DM_I2C - help - Set the MAC offset for i2C. - if I2C_EEPROM config SYS_I2C_EEPROM_ADDR diff --git a/drivers/misc/irq-uclass.c b/drivers/misc/irq-uclass.c index 94fa233f193..24b27962a7d 100644 --- a/drivers/misc/irq-uclass.c +++ b/drivers/misc/irq-uclass.c @@ -69,7 +69,7 @@ int irq_get_by_driver_info(struct udevice *dev, { int ret; - ret = device_get_by_driver_info(cells->node, &irq->dev); + ret = device_get_by_driver_info_idx(cells->idx, &irq->dev); if (ret) return ret; irq->id = cells->arg[0]; diff --git a/drivers/misc/p2sb-uclass.c b/drivers/misc/p2sb-uclass.c index b5219df46be..12abcff2da4 100644 --- a/drivers/misc/p2sb-uclass.c +++ b/drivers/misc/p2sb-uclass.c @@ -174,19 +174,20 @@ int p2sb_set_port_id(struct udevice *dev, int portid) if (!CONFIG_IS_ENABLED(OF_PLATDATA)) return -ENOSYS; - uclass_find_first_device(UCLASS_P2SB, &ps2b); - if (!ps2b) - return -EDEADLK; - dev->parent = ps2b; - - /* - * We must allocate this, since when the device was bound it did not - * have a parent. - * TODO(sjg@chromium.org): Add a parent pointer to child devices in dtoc - */ - dev->parent_platdata = malloc(sizeof(*pplat)); - if (!dev->parent_platdata) - return -ENOMEM; + if (!CONFIG_IS_ENABLED(OF_PLATDATA_PARENT)) { + uclass_find_first_device(UCLASS_P2SB, &ps2b); + if (!ps2b) + return -EDEADLK; + dev->parent = ps2b; + + /* + * We must allocate this, since when the device was bound it did + * not have a parent. + */ + dev->parent_platdata = malloc(sizeof(*pplat)); + if (!dev->parent_platdata) + return -ENOMEM; + } pplat = dev_get_parent_platdata(dev); pplat->pid = portid; diff --git a/drivers/misc/spltest_sandbox.c b/drivers/misc/spltest_sandbox.c index 999031625b5..3ae6707593e 100644 --- a/drivers/misc/spltest_sandbox.c +++ b/drivers/misc/spltest_sandbox.c @@ -8,43 +8,8 @@ #include <dm.h> #include <dt-structs.h> -static int sandbox_spl_probe(struct udevice *dev) -{ - struct dtd_sandbox_spl_test *plat = dev_get_platdata(dev); - int i; - - printf("of-platdata probe:\n"); - printf("bool %d\n", plat->boolval); - - printf("byte %02x\n", plat->byteval); - printf("bytearray"); - for (i = 0; i < sizeof(plat->bytearray); i++) - printf(" %02x", plat->bytearray[i]); - printf("\n"); - - printf("int %d\n", plat->intval); - printf("intarray"); - for (i = 0; i < ARRAY_SIZE(plat->intarray); i++) - printf(" %d", plat->intarray[i]); - printf("\n"); - - printf("longbytearray"); - for (i = 0; i < sizeof(plat->longbytearray); i++) - printf(" %02x", plat->longbytearray[i]); - printf("\n"); - - printf("string %s\n", plat->stringval); - printf("stringarray"); - for (i = 0; i < ARRAY_SIZE(plat->stringarray); i++) - printf(" \"%s\"", plat->stringarray[i]); - printf("\n"); - - return 0; -} - U_BOOT_DRIVER(sandbox_spl_test) = { .name = "sandbox_spl_test", .id = UCLASS_MISC, .flags = DM_FLAG_PRE_RELOC, - .probe = sandbox_spl_probe, }; diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 0c252e34c74..14d79139864 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig @@ -46,6 +46,9 @@ config SPL_DM_MMC if MMC +config MMC_SDHCI_ADMA_HELPERS + bool + config MMC_SPI bool "Support for SPI-based MMC controller" depends on DM_MMC && DM_SPI @@ -445,6 +448,7 @@ config MMC_SDHCI_SDMA config MMC_SDHCI_ADMA bool "Support SDHCI ADMA2" depends on MMC_SDHCI + select MMC_SDHCI_ADMA_HELPERS help This enables support for the ADMA (Advanced DMA) defined in the SD Host Controller Standard Specification Version 3.00 @@ -452,6 +456,7 @@ config MMC_SDHCI_ADMA config SPL_MMC_SDHCI_ADMA bool "Support SDHCI ADMA2 in SPL" depends on MMC_SDHCI + select MMC_SDHCI_ADMA_HELPERS help This enables support for the ADMA (Advanced DMA) defined in the SD Host Controller Standard Specification Version 3.00 in SPL. @@ -750,6 +755,14 @@ config FSL_ESDHC This selects support for the eSDHC (Enhanced Secure Digital Host Controller) found on numerous Freescale/NXP SoCs. +config FSL_ESDHC_SUPPORT_ADMA2 + bool "enable ADMA2 support" + depends on FSL_ESDHC + select MMC_SDHCI_ADMA_HELPERS + help + This enables support for the ADMA2 transfer mode. If supported by the + eSDHC it will allow 64bit DMA addresses. + config FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND bool "enable eSDHC workaround for 3.3v IO reliability issue" depends on FSL_ESDHC && DM_MMC diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile index 22266ec8ece..1c849cbab2f 100644 --- a/drivers/mmc/Makefile +++ b/drivers/mmc/Makefile @@ -6,6 +6,7 @@ obj-y += mmc.o obj-$(CONFIG_$(SPL_)DM_MMC) += mmc-uclass.o obj-$(CONFIG_$(SPL_)MMC_WRITE) += mmc_write.o +obj-$(CONFIG_MMC_SDHCI_ADMA_HELPERS) += sdhci-adma.o ifndef CONFIG_$(SPL_)BLK obj-y += mmc_legacy.o diff --git a/drivers/mmc/fsl_esdhc.c b/drivers/mmc/fsl_esdhc.c index de9fe01bc5c..642784e1f35 100644 --- a/drivers/mmc/fsl_esdhc.c +++ b/drivers/mmc/fsl_esdhc.c @@ -26,6 +26,8 @@ #include <dm/device_compat.h> #include <linux/bitops.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <sdhci.h> DECLARE_GLOBAL_DATA_PTR; @@ -51,8 +53,9 @@ struct fsl_esdhc { char reserved1[8]; /* reserved */ uint fevt; /* Force event register */ uint admaes; /* ADMA error status register */ - uint adsaddr; /* ADMA system address register */ - char reserved2[160]; + uint adsaddrl; /* ADMA system address low register */ + uint adsaddrh; /* ADMA system address high register */ + char reserved2[156]; uint hostver; /* Host controller version register */ char reserved3[4]; /* reserved */ uint dmaerraddr; /* DMA error address register */ @@ -60,7 +63,14 @@ struct fsl_esdhc { uint dmaerrattr; /* DMA error attribute register */ char reserved5[4]; /* reserved */ uint hostcapblt2; /* Host controller capabilities register 2 */ - char reserved6[756]; /* reserved */ + char reserved6[8]; /* reserved */ + uint tbctl; /* Tuning block control register */ + char reserved7[32]; /* reserved */ + uint sdclkctl; /* SD clock control register */ + uint sdtimingctl; /* SD timing control register */ + char reserved8[20]; /* reserved */ + uint dllcfg0; /* DLL config 0 register */ + char reserved9[680]; /* reserved */ uint esdhcctl; /* eSDHC control register */ }; @@ -91,6 +101,8 @@ struct fsl_esdhc_priv { struct mmc *mmc; #endif struct udevice *dev; + struct sdhci_adma_desc *adma_desc_table; + dma_addr_t dma_addr; }; /* Return the XFERTYP flags for a given command and data packet */ @@ -100,15 +112,15 @@ static uint esdhc_xfertyp(struct mmc_cmd *cmd, struct mmc_data *data) if (data) { xfertyp |= XFERTYP_DPSEL; -#ifndef CONFIG_SYS_FSL_ESDHC_USE_PIO - xfertyp |= XFERTYP_DMAEN; -#endif + if (!IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO) && + cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK && + cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK_HS200) + xfertyp |= XFERTYP_DMAEN; if (data->blocks > 1) { xfertyp |= XFERTYP_MSBSEL; xfertyp |= XFERTYP_BCEN; -#ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC111 - xfertyp |= XFERTYP_AC12EN; -#endif + if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC111)) + xfertyp |= XFERTYP_AC12EN; } if (data->flags & MMC_DATA_READ) @@ -132,7 +144,6 @@ static uint esdhc_xfertyp(struct mmc_cmd *cmd, struct mmc_data *data) return XFERTYP_CMD(cmd->cmdidx) | xfertyp; } -#ifdef CONFIG_SYS_FSL_ESDHC_USE_PIO /* * PIO Read/Write Mode reduce the performace as DMA is not used in this mode. */ @@ -195,66 +206,83 @@ static void esdhc_pio_read_write(struct fsl_esdhc_priv *priv, } } } -#endif -static int esdhc_setup_data(struct fsl_esdhc_priv *priv, struct mmc *mmc, - struct mmc_data *data) +static void esdhc_setup_watermark_level(struct fsl_esdhc_priv *priv, + struct mmc_data *data) { - int timeout; struct fsl_esdhc *regs = priv->esdhc_regs; -#if defined(CONFIG_FSL_LAYERSCAPE) - dma_addr_t addr; -#endif - uint wml_value; - - wml_value = data->blocksize/4; + uint wml_value = data->blocksize / 4; if (data->flags & MMC_DATA_READ) { if (wml_value > WML_RD_WML_MAX) wml_value = WML_RD_WML_MAX_VAL; esdhc_clrsetbits32(®s->wml, WML_RD_WML_MASK, wml_value); -#ifndef CONFIG_SYS_FSL_ESDHC_USE_PIO -#if defined(CONFIG_FSL_LAYERSCAPE) - addr = virt_to_phys((void *)(data->dest)); - if (upper_32_bits(addr)) - printf("Error found for upper 32 bits\n"); - else - esdhc_write32(®s->dsaddr, lower_32_bits(addr)); -#else - esdhc_write32(®s->dsaddr, (u32)data->dest); -#endif -#endif } else { -#ifndef CONFIG_SYS_FSL_ESDHC_USE_PIO - flush_dcache_range((ulong)data->src, - (ulong)data->src+data->blocks - *data->blocksize); -#endif if (wml_value > WML_WR_WML_MAX) wml_value = WML_WR_WML_MAX_VAL; - if (!(esdhc_read32(®s->prsstat) & PRSSTAT_WPSPL)) { - printf("Can not write to locked SD card.\n"); - return -EINVAL; - } - esdhc_clrsetbits32(®s->wml, WML_WR_WML_MASK, - wml_value << 16); -#ifndef CONFIG_SYS_FSL_ESDHC_USE_PIO -#if defined(CONFIG_FSL_LAYERSCAPE) - addr = virt_to_phys((void *)(data->src)); - if (upper_32_bits(addr)) - printf("Error found for upper 32 bits\n"); - else - esdhc_write32(®s->dsaddr, lower_32_bits(addr)); -#else - esdhc_write32(®s->dsaddr, (u32)data->src); -#endif -#endif + wml_value << 16); + } +} + +static void esdhc_setup_dma(struct fsl_esdhc_priv *priv, struct mmc_data *data) +{ + uint trans_bytes = data->blocksize * data->blocks; + struct fsl_esdhc *regs = priv->esdhc_regs; + phys_addr_t adma_addr; + void *buf; + + if (data->flags & MMC_DATA_WRITE) + buf = (void *)data->src; + else + buf = data->dest; + + priv->dma_addr = dma_map_single(buf, trans_bytes, + mmc_get_dma_dir(data)); + + if (IS_ENABLED(CONFIG_FSL_ESDHC_SUPPORT_ADMA2) && + priv->adma_desc_table) { + debug("Using ADMA2\n"); + /* prefer ADMA2 if it is available */ + sdhci_prepare_adma_table(priv->adma_desc_table, data, + priv->dma_addr); + + adma_addr = virt_to_phys(priv->adma_desc_table); + esdhc_write32(®s->adsaddrl, lower_32_bits(adma_addr)); + if (IS_ENABLED(CONFIG_DMA_ADDR_T_64BIT)) + esdhc_write32(®s->adsaddrh, upper_32_bits(adma_addr)); + esdhc_clrsetbits32(®s->proctl, PROCTL_DMAS_MASK, + PROCTL_DMAS_ADMA2); + } else { + debug("Using SDMA\n"); + if (upper_32_bits(priv->dma_addr)) + printf("Cannot use 64 bit addresses with SDMA\n"); + esdhc_write32(®s->dsaddr, lower_32_bits(priv->dma_addr)); + esdhc_clrsetbits32(®s->proctl, PROCTL_DMAS_MASK, + PROCTL_DMAS_SDMA); } esdhc_write32(®s->blkattr, data->blocks << 16 | data->blocksize); +} + +static int esdhc_setup_data(struct fsl_esdhc_priv *priv, struct mmc *mmc, + struct mmc_data *data) +{ + int timeout; + bool is_write = data->flags & MMC_DATA_WRITE; + struct fsl_esdhc *regs = priv->esdhc_regs; + + if (is_write && !(esdhc_read32(®s->prsstat) & PRSSTAT_WPSPL)) { + printf("Can not write to locked SD card.\n"); + return -EINVAL; + } + + if (IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO)) + esdhc_setup_watermark_level(priv, data); + else + esdhc_setup_dma(priv, data); /* Calculate the timeout period for data transactions */ /* @@ -287,41 +315,18 @@ static int esdhc_setup_data(struct fsl_esdhc_priv *priv, struct mmc *mmc, if (timeout < 0) timeout = 0; -#ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC_A001 - if ((timeout == 4) || (timeout == 8) || (timeout == 12)) + if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC_A001) && + (timeout == 4 || timeout == 8 || timeout == 12)) timeout++; -#endif -#ifdef ESDHCI_QUIRK_BROKEN_TIMEOUT_VALUE - timeout = 0xE; -#endif + if (IS_ENABLED(ESDHCI_QUIRK_BROKEN_TIMEOUT_VALUE)) + timeout = 0xE; + esdhc_clrsetbits32(®s->sysctl, SYSCTL_TIMEOUT_MASK, timeout << 16); return 0; } -static void check_and_invalidate_dcache_range - (struct mmc_cmd *cmd, - struct mmc_data *data) { - unsigned start = 0; - unsigned end = 0; - unsigned size = roundup(ARCH_DMA_MINALIGN, - data->blocks*data->blocksize); -#if defined(CONFIG_FSL_LAYERSCAPE) - dma_addr_t addr; - - addr = virt_to_phys((void *)(data->dest)); - if (upper_32_bits(addr)) - printf("Error found for upper 32 bits\n"); - else - start = lower_32_bits(addr); -#else - start = (unsigned)data->dest; -#endif - end = start + size; - invalidate_dcache_range(start, end); -} - /* * Sends a command out on the bus. Takes the mmc pointer, * a command pointer, and an optional data pointer. @@ -336,10 +341,9 @@ static int esdhc_send_cmd_common(struct fsl_esdhc_priv *priv, struct mmc *mmc, struct fsl_esdhc *regs = priv->esdhc_regs; unsigned long start; -#ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC111 - if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION) + if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC111) && + cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION) return 0; -#endif esdhc_write32(®s->irqstat, -1); @@ -365,9 +369,6 @@ static int esdhc_send_cmd_common(struct fsl_esdhc_priv *priv, struct mmc *mmc, err = esdhc_setup_data(priv, mmc, data); if(err) return err; - - if (data->flags & MMC_DATA_READ) - check_and_invalidate_dcache_range(cmd, data); } /* Figure out the transfer arguments */ @@ -380,6 +381,10 @@ static int esdhc_send_cmd_common(struct fsl_esdhc_priv *priv, struct mmc *mmc, esdhc_write32(®s->cmdarg, cmd->cmdarg); esdhc_write32(®s->xfertyp, xfertyp); + if (cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK || + cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) + flags = IRQSTAT_BRR; + /* Wait for the command to complete */ start = get_timer(0); while (!(esdhc_read32(®s->irqstat) & flags)) { @@ -436,32 +441,37 @@ static int esdhc_send_cmd_common(struct fsl_esdhc_priv *priv, struct mmc *mmc, /* Wait until all of the blocks are transferred */ if (data) { -#ifdef CONFIG_SYS_FSL_ESDHC_USE_PIO - esdhc_pio_read_write(priv, data); -#else - do { - irqstat = esdhc_read32(®s->irqstat); - - if (irqstat & IRQSTAT_DTOE) { - err = -ETIMEDOUT; - goto out; - } + if (IS_ENABLED(CONFIG_SYS_FSL_ESDHC_USE_PIO)) { + esdhc_pio_read_write(priv, data); + } else { + flags = DATA_COMPLETE; + if (cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK || + cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) + flags = IRQSTAT_BRR; + + do { + irqstat = esdhc_read32(®s->irqstat); - if (irqstat & DATA_ERR) { - err = -ECOMM; - goto out; - } - } while ((irqstat & DATA_COMPLETE) != DATA_COMPLETE); + if (irqstat & IRQSTAT_DTOE) { + err = -ETIMEDOUT; + goto out; + } - /* - * Need invalidate the dcache here again to avoid any - * cache-fill during the DMA operations such as the - * speculative pre-fetching etc. - */ - if (data->flags & MMC_DATA_READ) { - check_and_invalidate_dcache_range(cmd, data); + if (irqstat & DATA_ERR) { + err = -ECOMM; + goto out; + } + } while ((irqstat & flags) != flags); + + /* + * Need invalidate the dcache here again to avoid any + * cache-fill during the DMA operations such as the + * speculative pre-fetching etc. + */ + dma_unmap_single(priv->dma_addr, + data->blocks * data->blocksize, + mmc_get_dma_dir(data)); } -#endif } out: @@ -505,6 +515,9 @@ static void set_sysctl(struct fsl_esdhc_priv *priv, struct mmc *mmc, uint clock) while (sdhc_clk / (div * pre_div) > clock && div < 16) div++; + mmc->clock = sdhc_clk / pre_div / div; + priv->clock = mmc->clock; + pre_div >>= 1; div -= 1; @@ -555,6 +568,86 @@ static void esdhc_clock_control(struct fsl_esdhc_priv *priv, bool enable) } } +static void esdhc_flush_async_fifo(struct fsl_esdhc_priv *priv) +{ + struct fsl_esdhc *regs = priv->esdhc_regs; + u32 time_out; + + esdhc_setbits32(®s->esdhcctl, ESDHCCTL_FAF); + + time_out = 20; + while (esdhc_read32(®s->esdhcctl) & ESDHCCTL_FAF) { + if (time_out == 0) { + printf("fsl_esdhc: Flush asynchronous FIFO timeout.\n"); + break; + } + time_out--; + mdelay(1); + } +} + +static void esdhc_tuning_block_enable(struct fsl_esdhc_priv *priv, + bool en) +{ + struct fsl_esdhc *regs = priv->esdhc_regs; + + esdhc_clock_control(priv, false); + esdhc_flush_async_fifo(priv); + if (en) + esdhc_setbits32(®s->tbctl, TBCTL_TB_EN); + else + esdhc_clrbits32(®s->tbctl, TBCTL_TB_EN); + esdhc_clock_control(priv, true); +} + +static void esdhc_exit_hs400(struct fsl_esdhc_priv *priv) +{ + struct fsl_esdhc *regs = priv->esdhc_regs; + + esdhc_clrbits32(®s->sdtimingctl, FLW_CTL_BG); + esdhc_clrbits32(®s->sdclkctl, CMD_CLK_CTL); + + esdhc_clock_control(priv, false); + esdhc_clrbits32(®s->tbctl, HS400_MODE); + esdhc_clock_control(priv, true); + + esdhc_clrbits32(®s->dllcfg0, DLL_FREQ_SEL | DLL_ENABLE); + esdhc_clrbits32(®s->tbctl, HS400_WNDW_ADJUST); + + esdhc_tuning_block_enable(priv, false); +} + +static void esdhc_set_timing(struct fsl_esdhc_priv *priv, enum bus_mode mode) +{ + struct fsl_esdhc *regs = priv->esdhc_regs; + + /* Exit HS400 mode before setting any other mode */ + if (esdhc_read32(®s->tbctl) & HS400_MODE && + mode != MMC_HS_400) + esdhc_exit_hs400(priv); + + esdhc_clock_control(priv, false); + + if (mode == MMC_HS_200) + esdhc_clrsetbits32(®s->autoc12err, UHSM_MASK, + UHSM_SDR104_HS200); + if (mode == MMC_HS_400) { + esdhc_setbits32(®s->tbctl, HS400_MODE); + esdhc_setbits32(®s->sdclkctl, CMD_CLK_CTL); + esdhc_clock_control(priv, true); + + if (priv->clock == 200000000) + esdhc_setbits32(®s->dllcfg0, DLL_FREQ_SEL); + + esdhc_setbits32(®s->dllcfg0, DLL_ENABLE); + esdhc_setbits32(®s->tbctl, HS400_WNDW_ADJUST); + + esdhc_clock_control(priv, false); + esdhc_flush_async_fifo(priv); + } + esdhc_clock_control(priv, true); +} + static int esdhc_set_ios_common(struct fsl_esdhc_priv *priv, struct mmc *mmc) { struct fsl_esdhc *regs = priv->esdhc_regs; @@ -566,10 +659,16 @@ static int esdhc_set_ios_common(struct fsl_esdhc_priv *priv, struct mmc *mmc) esdhc_clock_control(priv, true); } + if (mmc->selected_mode == MMC_HS_400) + esdhc_tuning_block_enable(priv, true); + /* Set the clock speed */ if (priv->clock != mmc->clock) set_sysctl(priv, mmc, mmc->clock); + /* Set timing */ + esdhc_set_timing(priv, mmc->selected_mode); + /* Set the bus width */ esdhc_clrbits32(®s->proctl, PROCTL_DTW_4 | PROCTL_DTW_8); @@ -608,6 +707,9 @@ static int esdhc_init_common(struct fsl_esdhc_priv *priv, struct mmc *mmc) return -ETIMEDOUT; } + /* Clean TBCTL[TB_EN] which is not able to be reset by reset all */ + esdhc_clrbits32(®s->tbctl, TBCTL_TB_EN); + esdhc_enable_cache_snooping(regs); esdhc_setbits32(®s->sysctl, SYSCTL_HCKEN | SYSCTL_IPGEN); @@ -648,12 +750,10 @@ static void fsl_esdhc_get_cfg_common(struct fsl_esdhc_priv *priv, u32 caps; caps = esdhc_read32(®s->hostcapblt); -#ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC135 - caps &= ~(HOSTCAPBLT_SRS | HOSTCAPBLT_VS18 | HOSTCAPBLT_VS30); -#endif -#ifdef CONFIG_SYS_FSL_MMC_HAS_CAPBLT_VS33 - caps |= HOSTCAPBLT_VS33; -#endif + if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_ESDHC135)) + caps &= ~(HOSTCAPBLT_SRS | HOSTCAPBLT_VS18 | HOSTCAPBLT_VS30); + if (IS_ENABLED(CONFIG_SYS_FSL_MMC_HAS_CAPBLT_VS33)) + caps |= HOSTCAPBLT_VS33; if (caps & HOSTCAPBLT_VS18) cfg->voltages |= MMC_VDD_165_195; if (caps & HOSTCAPBLT_VS30) @@ -674,19 +774,18 @@ static void fsl_esdhc_get_cfg_common(struct fsl_esdhc_priv *priv, #ifdef CONFIG_OF_LIBFDT __weak int esdhc_status_fixup(void *blob, const char *compat) { -#ifdef CONFIG_FSL_ESDHC_PIN_MUX - if (!hwconfig("esdhc")) { + if (IS_ENABLED(CONFIG_FSL_ESDHC_PIN_MUX) && !hwconfig("esdhc")) { do_fixup_by_compat(blob, compat, "status", "disabled", sizeof("disabled"), 1); return 1; } -#endif + return 0; } -#ifdef CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND -static int fsl_esdhc_get_cd(struct udevice *dev); +#if CONFIG_IS_ENABLED(DM_MMC) +static int fsl_esdhc_get_cd(struct udevice *dev); static void esdhc_disable_for_no_card(void *blob) { struct udevice *dev; @@ -705,6 +804,10 @@ static void esdhc_disable_for_no_card(void *blob) sizeof("disabled"), 1); } } +#else +static void esdhc_disable_for_no_card(void *blob) +{ +} #endif void fdt_fixup_esdhc(void *blob, struct bd_info *bd) @@ -713,9 +816,10 @@ void fdt_fixup_esdhc(void *blob, struct bd_info *bd) if (esdhc_status_fixup(blob, compat)) return; -#ifdef CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND - esdhc_disable_for_no_card(blob); -#endif + + if (IS_ENABLED(CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND)) + esdhc_disable_for_no_card(blob); + do_fixup_by_compat_u32(blob, compat, "clock-frequency", gd->arch.sdhc_clk, 1); } @@ -797,10 +901,9 @@ int fsl_esdhc_initialize(struct bd_info *bis, struct fsl_esdhc_cfg *cfg) printf("No max bus width provided. Assume 8-bit supported.\n"); } -#ifdef CONFIG_ESDHC_DETECT_8_BIT_QUIRK - if (CONFIG_ESDHC_DETECT_8_BIT_QUIRK) + if (IS_ENABLED(CONFIG_ESDHC_DETECT_8_BIT_QUIRK)) mmc_cfg->host_caps &= ~MMC_MODE_8BIT; -#endif + mmc_cfg->ops = &esdhc_ops; fsl_esdhc_get_cfg_common(priv, mmc_cfg); @@ -832,6 +935,7 @@ static int fsl_esdhc_probe(struct udevice *dev) struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev); struct fsl_esdhc_plat *plat = dev_get_platdata(dev); struct fsl_esdhc_priv *priv = dev_get_priv(dev); + u32 caps, hostver; fdt_addr_t addr; struct mmc *mmc; int ret; @@ -846,6 +950,21 @@ static int fsl_esdhc_probe(struct udevice *dev) #endif priv->dev = dev; + if (IS_ENABLED(CONFIG_FSL_ESDHC_SUPPORT_ADMA2)) { + /* + * Only newer eSDHC controllers can do ADMA2 if the ADMA flag + * is set in the host capabilities register. + */ + caps = esdhc_read32(&priv->esdhc_regs->hostcapblt); + hostver = esdhc_read32(&priv->esdhc_regs->hostver); + if (caps & HOSTCAPBLT_DMAS && + HOSTVER_VENDOR(hostver) > VENDOR_V_22) { + priv->adma_desc_table = sdhci_adma_init(); + if (!priv->adma_desc_table) + debug("Could not allocate ADMA tables, falling back to SDMA\n"); + } + } + if (gd->arch.sdhc_per_clk) { priv->sdhc_clk = gd->arch.sdhc_per_clk; priv->is_sdhc_per_clk = true; @@ -872,10 +991,10 @@ static int fsl_esdhc_probe(struct udevice *dev) if (ret) return ret; -#ifdef CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND - if (!fsl_esdhc_get_cd(dev)) + if (IS_ENABLED(CONFIG_FSL_ESDHC_33V_IO_RELIABILITY_WORKAROUND) && + !fsl_esdhc_get_cd(dev)) esdhc_setbits32(&priv->esdhc_regs->proctl, PROCTL_VOLT_SEL); -#endif + return 0; } @@ -907,6 +1026,64 @@ static int fsl_esdhc_set_ios(struct udevice *dev) return esdhc_set_ios_common(priv, &plat->mmc); } +static int fsl_esdhc_reinit(struct udevice *dev) +{ + struct fsl_esdhc_plat *plat = dev_get_platdata(dev); + struct fsl_esdhc_priv *priv = dev_get_priv(dev); + + return esdhc_init_common(priv, &plat->mmc); +} + +#ifdef MMC_SUPPORTS_TUNING +static int fsl_esdhc_execute_tuning(struct udevice *dev, uint32_t opcode) +{ + struct fsl_esdhc_plat *plat = dev_get_platdata(dev); + struct fsl_esdhc_priv *priv = dev_get_priv(dev); + struct fsl_esdhc *regs = priv->esdhc_regs; + u32 val, irqstaten; + int i; + + esdhc_tuning_block_enable(priv, true); + esdhc_setbits32(®s->autoc12err, EXECUTE_TUNING); + + irqstaten = esdhc_read32(®s->irqstaten); + esdhc_write32(®s->irqstaten, IRQSTATEN_BRR); + + for (i = 0; i < MAX_TUNING_LOOP; i++) { + mmc_send_tuning(&plat->mmc, opcode, NULL); + mdelay(1); + + val = esdhc_read32(®s->autoc12err); + if (!(val & EXECUTE_TUNING)) { + if (val & SMPCLKSEL) + break; + } + } + + esdhc_write32(®s->irqstaten, irqstaten); + + if (i != MAX_TUNING_LOOP) { + if (plat->mmc.hs400_tuning) + esdhc_setbits32(®s->sdtimingctl, FLW_CTL_BG); + return 0; + } + + printf("fsl_esdhc: tuning failed!\n"); + esdhc_clrbits32(®s->autoc12err, SMPCLKSEL); + esdhc_clrbits32(®s->autoc12err, EXECUTE_TUNING); + esdhc_tuning_block_enable(priv, false); + return -ETIMEDOUT; +} +#endif + +int fsl_esdhc_hs400_prepare_ddr(struct udevice *dev) +{ + struct fsl_esdhc_priv *priv = dev_get_priv(dev); + + esdhc_tuning_block_enable(priv, false); + return 0; +} + static const struct dm_mmc_ops fsl_esdhc_ops = { .get_cd = fsl_esdhc_get_cd, .send_cmd = fsl_esdhc_send_cmd, @@ -914,6 +1091,8 @@ static const struct dm_mmc_ops fsl_esdhc_ops = { #ifdef MMC_SUPPORTS_TUNING .execute_tuning = fsl_esdhc_execute_tuning, #endif + .reinit = fsl_esdhc_reinit, + .hs400_prepare_ddr = fsl_esdhc_hs400_prepare_ddr, }; static const struct udevice_id fsl_esdhc_ids[] = { diff --git a/drivers/mmc/fsl_esdhc_imx.c b/drivers/mmc/fsl_esdhc_imx.c index 0c866b168f9..22040c67a84 100644 --- a/drivers/mmc/fsl_esdhc_imx.c +++ b/drivers/mmc/fsl_esdhc_imx.c @@ -462,13 +462,6 @@ static int esdhc_send_cmd_common(struct fsl_esdhc_priv *priv, struct mmc *mmc, while (esdhc_read32(®s->prsstat) & PRSSTAT_DLA) ; - /* Wait at least 8 SD clock cycles before the next command */ - /* - * Note: This is way more than 8 cycles, but 1ms seems to - * resolve timing issues with some cards - */ - udelay(1000); - /* Set up for a data transfer if we have one */ if (data) { err = esdhc_setup_data(priv, mmc, data); @@ -729,7 +722,7 @@ static void esdhc_set_strobe_dll(struct mmc *mmc) u32 val; if (priv->clock > ESDHC_STROBE_DLL_CLK_FREQ) { - writel(ESDHC_STROBE_DLL_CTRL_RESET, ®s->strobe_dllctrl); + esdhc_write32(®s->strobe_dllctrl, ESDHC_STROBE_DLL_CTRL_RESET); /* * enable strobe dll ctrl and adjust the delay target @@ -738,10 +731,10 @@ static void esdhc_set_strobe_dll(struct mmc *mmc) val = ESDHC_STROBE_DLL_CTRL_ENABLE | (priv->strobe_dll_delay_target << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); - writel(val, ®s->strobe_dllctrl); + esdhc_write32(®s->strobe_dllctrl, val); /* wait 1us to make sure strobe dll status register stable */ mdelay(1); - val = readl(®s->strobe_dllstat); + val = esdhc_read32(®s->strobe_dllstat); if (!(val & ESDHC_STROBE_DLL_STS_REF_LOCK)) pr_warn("HS400 strobe DLL status REF not lock!\n"); if (!(val & ESDHC_STROBE_DLL_STS_SLV_LOCK)) @@ -755,18 +748,18 @@ static int esdhc_set_timing(struct mmc *mmc) struct fsl_esdhc *regs = priv->esdhc_regs; u32 mixctrl; - mixctrl = readl(®s->mixctrl); + mixctrl = esdhc_read32(®s->mixctrl); mixctrl &= ~(MIX_CTRL_DDREN | MIX_CTRL_HS400_EN); switch (mmc->selected_mode) { case MMC_LEGACY: esdhc_reset_tuning(mmc); - writel(mixctrl, ®s->mixctrl); + esdhc_write32(®s->mixctrl, mixctrl); break; case MMC_HS_400: case MMC_HS_400_ES: mixctrl |= MIX_CTRL_DDREN | MIX_CTRL_HS400_EN; - writel(mixctrl, ®s->mixctrl); + esdhc_write32(®s->mixctrl, mixctrl); esdhc_set_strobe_dll(mmc); break; case MMC_HS: @@ -777,12 +770,12 @@ static int esdhc_set_timing(struct mmc *mmc) case UHS_SDR25: case UHS_SDR50: case UHS_SDR104: - writel(mixctrl, ®s->mixctrl); + esdhc_write32(®s->mixctrl, mixctrl); break; case UHS_DDR50: case MMC_DDR_52: mixctrl |= MIX_CTRL_DDREN; - writel(mixctrl, ®s->mixctrl); + esdhc_write32(®s->mixctrl, mixctrl); break; default: printf("Not supported %d\n", mmc->selected_mode); @@ -862,8 +855,8 @@ static int fsl_esdhc_execute_tuning(struct udevice *dev, uint32_t opcode) struct fsl_esdhc_priv *priv = dev_get_priv(dev); struct fsl_esdhc *regs = priv->esdhc_regs; struct mmc *mmc = &plat->mmc; - u32 irqstaten = readl(®s->irqstaten); - u32 irqsigen = readl(®s->irqsigen); + u32 irqstaten = esdhc_read32(®s->irqstaten); + u32 irqsigen = esdhc_read32(®s->irqsigen); int i, ret = -ETIMEDOUT; u32 val, mixctrl; @@ -873,25 +866,25 @@ static int fsl_esdhc_execute_tuning(struct udevice *dev, uint32_t opcode) /* This is readw/writew SDHCI_HOST_CONTROL2 when tuning */ if (priv->flags & ESDHC_FLAG_STD_TUNING) { - val = readl(®s->autoc12err); - mixctrl = readl(®s->mixctrl); + val = esdhc_read32(®s->autoc12err); + mixctrl = esdhc_read32(®s->mixctrl); val &= ~MIX_CTRL_SMPCLK_SEL; mixctrl &= ~(MIX_CTRL_FBCLK_SEL | MIX_CTRL_AUTO_TUNE_EN); val |= MIX_CTRL_EXE_TUNE; mixctrl |= MIX_CTRL_FBCLK_SEL | MIX_CTRL_AUTO_TUNE_EN; - writel(val, ®s->autoc12err); - writel(mixctrl, ®s->mixctrl); + esdhc_write32(®s->autoc12err, val); + esdhc_write32(®s->mixctrl, mixctrl); } /* sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); */ - mixctrl = readl(®s->mixctrl); + mixctrl = esdhc_read32(®s->mixctrl); mixctrl = MIX_CTRL_DTDSEL_READ | (mixctrl & ~MIX_CTRL_SDHCI_MASK); - writel(mixctrl, ®s->mixctrl); + esdhc_write32(®s->mixctrl, mixctrl); - writel(IRQSTATEN_BRR, ®s->irqstaten); - writel(IRQSTATEN_BRR, ®s->irqsigen); + esdhc_write32(®s->irqstaten, IRQSTATEN_BRR); + esdhc_write32(®s->irqsigen, IRQSTATEN_BRR); /* * Issue opcode repeatedly till Execute Tuning is set to 0 or the number @@ -902,22 +895,22 @@ static int fsl_esdhc_execute_tuning(struct udevice *dev, uint32_t opcode) if (opcode == MMC_CMD_SEND_TUNING_BLOCK_HS200) { if (mmc->bus_width == 8) - writel(0x7080, ®s->blkattr); + esdhc_write32(®s->blkattr, 0x7080); else if (mmc->bus_width == 4) - writel(0x7040, ®s->blkattr); + esdhc_write32(®s->blkattr, 0x7040); } else { - writel(0x7040, ®s->blkattr); + esdhc_write32(®s->blkattr, 0x7040); } /* sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE) */ - val = readl(®s->mixctrl); + val = esdhc_read32(®s->mixctrl); val = MIX_CTRL_DTDSEL_READ | (val & ~MIX_CTRL_SDHCI_MASK); - writel(val, ®s->mixctrl); + esdhc_write32(®s->mixctrl, val); /* We are using STD tuning, no need to check return value */ mmc_send_tuning(mmc, opcode, NULL); - ctrl = readl(®s->autoc12err); + ctrl = esdhc_read32(®s->autoc12err); if ((!(ctrl & MIX_CTRL_EXE_TUNE)) && (ctrl & MIX_CTRL_SMPCLK_SEL)) { ret = 0; @@ -925,8 +918,8 @@ static int fsl_esdhc_execute_tuning(struct udevice *dev, uint32_t opcode) } } - writel(irqstaten, ®s->irqstaten); - writel(irqsigen, ®s->irqsigen); + esdhc_write32(®s->irqstaten, irqstaten); + esdhc_write32(®s->irqsigen, irqsigen); esdhc_stop_tuning(mmc); @@ -1179,7 +1172,7 @@ static int fsl_esdhc_init(struct fsl_esdhc_priv *priv, if (priv->vs18_enable) esdhc_setbits32(®s->vendorspec, ESDHC_VENDORSPEC_VSELECT); - writel(SDHCI_IRQ_EN_BITS, ®s->irqstaten); + esdhc_write32(®s->irqstaten, SDHCI_IRQ_EN_BITS); cfg = &plat->cfg; #ifndef CONFIG_DM_MMC memset(cfg, '\0', sizeof(*cfg)); @@ -1260,10 +1253,10 @@ static int fsl_esdhc_init(struct fsl_esdhc_priv *priv, cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT; - writel(0, ®s->dllctrl); + esdhc_write32(®s->dllctrl, 0); if (priv->flags & ESDHC_FLAG_USDHC) { if (priv->flags & ESDHC_FLAG_STD_TUNING) { - u32 val = readl(®s->tuning_ctrl); + u32 val = esdhc_read32(®s->tuning_ctrl); val |= ESDHC_STD_TUNING_EN; val &= ~ESDHC_TUNING_START_TAP_MASK; @@ -1282,7 +1275,7 @@ static int fsl_esdhc_init(struct fsl_esdhc_priv *priv, * after the whole tuning procedure always can't get any response. */ val |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE; - writel(val, ®s->tuning_ctrl); + esdhc_write32(®s->tuning_ctrl, val); } } @@ -1511,12 +1504,9 @@ static int fsl_esdhc_probe(struct udevice *dev) if (CONFIG_IS_ENABLED(DM_GPIO) && !priv->non_removable) { struct udevice *gpiodev; - struct driver_info *info; - - info = (struct driver_info *)dtplat->cd_gpios->node; - - ret = device_get_by_driver_info(info, &gpiodev); + ret = device_get_by_driver_info_idx(dtplat->cd_gpios->idx, + &gpiodev); if (ret) return ret; @@ -1648,9 +1638,9 @@ static int fsl_esdhc_set_enhanced_strobe(struct udevice *dev) struct fsl_esdhc *regs = priv->esdhc_regs; u32 m; - m = readl(®s->mixctrl); + m = esdhc_read32(®s->mixctrl); m |= MIX_CTRL_HS400_ES; - writel(m, ®s->mixctrl); + esdhc_write32(®s->mixctrl, m); return 0; } diff --git a/drivers/mmc/mmc-uclass.c b/drivers/mmc/mmc-uclass.c index 90690c8d1e3..285ac480615 100644 --- a/drivers/mmc/mmc-uclass.c +++ b/drivers/mmc/mmc-uclass.c @@ -142,6 +142,21 @@ int mmc_set_enhanced_strobe(struct mmc *mmc) } #endif +int dm_mmc_hs400_prepare_ddr(struct udevice *dev) +{ + struct dm_mmc_ops *ops = mmc_get_ops(dev); + + if (ops->hs400_prepare_ddr) + return ops->hs400_prepare_ddr(dev); + + return 0; +} + +int mmc_hs400_prepare_ddr(struct mmc *mmc) +{ + return dm_mmc_hs400_prepare_ddr(mmc->dev); +} + int dm_mmc_host_power_cycle(struct udevice *dev) { struct dm_mmc_ops *ops = mmc_get_ops(dev); @@ -171,6 +186,21 @@ int mmc_deferred_probe(struct mmc *mmc) return dm_mmc_deferred_probe(mmc->dev); } +int dm_mmc_reinit(struct udevice *dev) +{ + struct dm_mmc_ops *ops = mmc_get_ops(dev); + + if (ops->reinit) + return ops->reinit(dev); + + return 0; +} + +int mmc_reinit(struct mmc *mmc) +{ + return dm_mmc_reinit(mmc->dev); +} + int mmc_of_parse(struct udevice *dev, struct mmc_config *cfg) { int val; @@ -198,7 +228,7 @@ int mmc_of_parse(struct udevice *dev, struct mmc_config *cfg) if (dev_read_bool(dev, "cap-sd-highspeed")) cfg->host_caps |= MMC_CAP(SD_HS); if (dev_read_bool(dev, "cap-mmc-highspeed")) - cfg->host_caps |= MMC_CAP(MMC_HS); + cfg->host_caps |= MMC_CAP(MMC_HS) | MMC_CAP(MMC_HS_52); if (dev_read_bool(dev, "sd-uhs-sdr12")) cfg->host_caps |= MMC_CAP(UHS_SDR12); if (dev_read_bool(dev, "sd-uhs-sdr25")) diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index d79cdef62ed..a47700e313c 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c @@ -805,8 +805,10 @@ static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, * capable of polling by using mmc_wait_dat0, then rely on waiting the * stated timeout to be sufficient. */ - if (ret == -ENOSYS && !send_status) + if (ret == -ENOSYS && !send_status) { mdelay(timeout_ms); + return 0; + } /* Finally wait until the card is ready or indicates a failure * to switch. It doesn't hurt to use CMD13 here even if send_status @@ -1982,7 +1984,9 @@ static int mmc_select_hs400(struct mmc *mmc) mmc_set_clock(mmc, mmc->tran_speed, false); /* execute tuning if needed */ + mmc->hs400_tuning = 1; err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200); + mmc->hs400_tuning = 0; if (err) { debug("tuning failed\n"); return err; @@ -1991,6 +1995,10 @@ static int mmc_select_hs400(struct mmc *mmc) /* Set back to HS */ mmc_set_card_speed(mmc, MMC_HS, true); + err = mmc_hs400_prepare_ddr(mmc); + if (err) + return err; + err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG); if (err) @@ -2184,7 +2192,7 @@ static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps) return 0; error: mmc_set_signal_voltage(mmc, old_voltage); - /* if an error occured, revert to a safer bus mode */ + /* if an error occurred, revert to a safer bus mode */ mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1); mmc_select_mode(mmc, MMC_LEGACY); @@ -2816,13 +2824,17 @@ int mmc_get_op_cond(struct mmc *mmc) return err; #if CONFIG_IS_ENABLED(DM_MMC) - /* The device has already been probed ready for use */ + /* + * Re-initialization is needed to clear old configuration for + * mmc rescan. + */ + err = mmc_reinit(mmc); #else /* made sure it's not NULL earlier */ err = mmc->cfg->ops->init(mmc); +#endif if (err) return err; -#endif mmc->ddr_mode = 0; retry: @@ -3111,3 +3123,12 @@ int mmc_set_bkops_enable(struct mmc *mmc) return 0; } #endif + +__weak int mmc_get_env_dev(void) +{ +#ifdef CONFIG_SYS_MMC_ENV_DEV + return CONFIG_SYS_MMC_ENV_DEV; +#else + return 0; +#endif +} diff --git a/drivers/mmc/mtk-sd.c b/drivers/mmc/mtk-sd.c index 30fe7a0aa20..4f9fa7d0ec3 100644 --- a/drivers/mmc/mtk-sd.c +++ b/drivers/mmc/mtk-sd.c @@ -1171,7 +1171,7 @@ skip_fall: internal_delay |= (1 << i); } - dev_err(dev, "Final internal delay: 0x%x\n", internal_delay); + dev_dbg(dev, "Final internal delay: 0x%x\n", internal_delay); internal_delay_phase = get_best_delay(dev, host, internal_delay); clrsetbits_le32(tune_reg, MSDC_PAD_TUNE_CMDRRDLY_M, @@ -1179,7 +1179,7 @@ skip_fall: MSDC_PAD_TUNE_CMDRRDLY_S); skip_internal: - dev_err(dev, "Final cmd pad delay: %x\n", final_delay); + dev_dbg(dev, "Final cmd pad delay: %x\n", final_delay); return final_delay == 0xff ? -EIO : 0; } @@ -1265,7 +1265,7 @@ skip_fall: host->hs200_write_int_delay << MSDC_PAD_TUNE_DATWRDLY_S); - dev_err(dev, "Final data pad delay: %x\n", final_delay); + dev_dbg(dev, "Final data pad delay: %x\n", final_delay); return final_delay == 0xff ? -EIO : 0; } diff --git a/drivers/mmc/octeontx_hsmmc.c b/drivers/mmc/octeontx_hsmmc.c index ddc36694e1e..38ca3736841 100644 --- a/drivers/mmc/octeontx_hsmmc.c +++ b/drivers/mmc/octeontx_hsmmc.c @@ -3638,7 +3638,6 @@ static int octeontx_mmc_slot_probe(struct udevice *dev) struct mmc *mmc; int err; - printk("%s (%d)\n", __func__, __LINE__); // test-only debug("%s(%s)\n", __func__, dev->name); if (!host_probed) { pr_err("%s(%s): Error: host not probed yet\n", diff --git a/drivers/mmc/sdhci-adma.c b/drivers/mmc/sdhci-adma.c new file mode 100644 index 00000000000..2ec057fbb19 --- /dev/null +++ b/drivers/mmc/sdhci-adma.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * SDHCI ADMA2 helper functions. + */ + +#include <common.h> +#include <cpu_func.h> +#include <sdhci.h> +#include <malloc.h> +#include <asm/cache.h> + +static void sdhci_adma_desc(struct sdhci_adma_desc *desc, + dma_addr_t addr, u16 len, bool end) +{ + u8 attr; + + attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA; + if (end) + attr |= ADMA_DESC_ATTR_END; + + desc->attr = attr; + desc->len = len; + desc->reserved = 0; + desc->addr_lo = lower_32_bits(addr); +#ifdef CONFIG_DMA_ADDR_T_64BIT + desc->addr_hi = upper_32_bits(addr); +#endif +} + +/** + * sdhci_prepare_adma_table() - Populate the ADMA table + * + * @table: Pointer to the ADMA table + * @data: Pointer to MMC data + * @addr: DMA address to write to or read from + * + * Fill the ADMA table according to the MMC data to read from or write to the + * given DMA address. + * Please note, that the table size depends on CONFIG_SYS_MMC_MAX_BLK_COUNT and + * we don't have to check for overflow. + */ +void sdhci_prepare_adma_table(struct sdhci_adma_desc *table, + struct mmc_data *data, dma_addr_t addr) +{ + uint trans_bytes = data->blocksize * data->blocks; + uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN); + struct sdhci_adma_desc *desc = table; + int i = desc_count; + + while (--i) { + sdhci_adma_desc(desc, addr, ADMA_MAX_LEN, false); + addr += ADMA_MAX_LEN; + trans_bytes -= ADMA_MAX_LEN; + desc++; + } + + sdhci_adma_desc(desc, addr, trans_bytes, true); + + flush_cache((dma_addr_t)table, + ROUND(desc_count * sizeof(struct sdhci_adma_desc), + ARCH_DMA_MINALIGN)); +} + +/** + * sdhci_adma_init() - initialize the ADMA descriptor table + * + * @return pointer to the allocated descriptor table or NULL in case of an + * error. + */ +struct sdhci_adma_desc *sdhci_adma_init(void) +{ + return memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ); +} diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c index 7673219fb33..06289343124 100644 --- a/drivers/mmc/sdhci.c +++ b/drivers/mmc/sdhci.c @@ -69,57 +69,6 @@ static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data) } } -#if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA) -static void sdhci_adma_desc(struct sdhci_host *host, dma_addr_t dma_addr, - u16 len, bool end) -{ - struct sdhci_adma_desc *desc; - u8 attr; - - desc = &host->adma_desc_table[host->desc_slot]; - - attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA; - if (!end) - host->desc_slot++; - else - attr |= ADMA_DESC_ATTR_END; - - desc->attr = attr; - desc->len = len; - desc->reserved = 0; - desc->addr_lo = lower_32_bits(dma_addr); -#ifdef CONFIG_DMA_ADDR_T_64BIT - desc->addr_hi = upper_32_bits(dma_addr); -#endif -} - -static void sdhci_prepare_adma_table(struct sdhci_host *host, - struct mmc_data *data) -{ - uint trans_bytes = data->blocksize * data->blocks; - uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN); - int i = desc_count; - dma_addr_t dma_addr = host->start_addr; - - host->desc_slot = 0; - - while (--i) { - sdhci_adma_desc(host, dma_addr, ADMA_MAX_LEN, false); - dma_addr += ADMA_MAX_LEN; - trans_bytes -= ADMA_MAX_LEN; - } - - sdhci_adma_desc(host, dma_addr, trans_bytes, true); - - flush_cache((dma_addr_t)host->adma_desc_table, - ROUND(desc_count * sizeof(struct sdhci_adma_desc), - ARCH_DMA_MINALIGN)); -} -#elif defined(CONFIG_MMC_SDHCI_SDMA) -static void sdhci_prepare_adma_table(struct sdhci_host *host, - struct mmc_data *data) -{} -#endif #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)) static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data, int *is_aligned, int trans_bytes) @@ -156,8 +105,11 @@ static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data, if (host->flags & USE_SDMA) { sdhci_writel(host, phys_to_bus((ulong)host->start_addr), SDHCI_DMA_ADDRESS); - } else if (host->flags & (USE_ADMA | USE_ADMA64)) { - sdhci_prepare_adma_table(host, data); + } +#if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA) + else if (host->flags & (USE_ADMA | USE_ADMA64)) { + sdhci_prepare_adma_table(host->adma_desc_table, data, + host->start_addr); sdhci_writel(host, lower_32_bits(host->adma_addr), SDHCI_ADMA_ADDRESS); @@ -165,6 +117,7 @@ static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data, sdhci_writel(host, upper_32_bits(host->adma_addr), SDHCI_ADMA_ADDRESS_HI); } +#endif } #else static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data, @@ -770,9 +723,9 @@ int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host, __func__); return -EINVAL; } - host->adma_desc_table = memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ); - + host->adma_desc_table = sdhci_adma_init(); host->adma_addr = (dma_addr_t)host->adma_desc_table; + #ifdef CONFIG_DMA_ADDR_T_64BIT host->flags |= USE_ADMA64; #else @@ -859,7 +812,8 @@ int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host, cfg->host_caps &= ~MMC_MODE_HS_52MHz; } - if (!(cfg->voltages & MMC_VDD_165_195)) + if (!(cfg->voltages & MMC_VDD_165_195) || + (host->quirks & SDHCI_QUIRK_NO_1_8_V)) caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50); diff --git a/drivers/mmc/sh_sdhi.c b/drivers/mmc/sh_sdhi.c index 315f95cce82..29f83b65542 100644 --- a/drivers/mmc/sh_sdhi.c +++ b/drivers/mmc/sh_sdhi.c @@ -784,8 +784,7 @@ int sh_sdhi_init(unsigned long addr, int ch, unsigned long quirks) return ret; error: - if (host) - free(host); + free(host); return ret; } diff --git a/drivers/mmc/stm32_sdmmc2.c b/drivers/mmc/stm32_sdmmc2.c index 6d503562171..77871d5afc9 100644 --- a/drivers/mmc/stm32_sdmmc2.c +++ b/drivers/mmc/stm32_sdmmc2.c @@ -676,27 +676,13 @@ static int stm32_sdmmc2_probe(struct udevice *dev) GPIOD_IS_IN); cfg->f_min = 400000; - cfg->f_max = dev_read_u32_default(dev, "max-frequency", 52000000); cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT; cfg->name = "STM32 SD/MMC"; cfg->host_caps = 0; - if (cfg->f_max > 25000000) - cfg->host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS; - - switch (dev_read_u32_default(dev, "bus-width", 1)) { - case 8: - cfg->host_caps |= MMC_MODE_8BIT; - /* fall through */ - case 4: - cfg->host_caps |= MMC_MODE_4BIT; - break; - case 1: - break; - default: - pr_err("invalid \"bus-width\" property, force to 1\n"); - } + cfg->f_max = 52000000; + mmc_of_parse(dev, cfg); upriv->mmc = &plat->mmc; diff --git a/drivers/mmc/zynq_sdhci.c b/drivers/mmc/zynq_sdhci.c index 775c17baac5..147ecc0d708 100644 --- a/drivers/mmc/zynq_sdhci.c +++ b/drivers/mmc/zynq_sdhci.c @@ -19,6 +19,20 @@ #include <sdhci.h> #include <zynqmp_tap_delay.h> +#define SDHCI_ARASAN_ITAPDLY_REGISTER 0xF0F8 +#define SDHCI_ARASAN_OTAPDLY_REGISTER 0xF0FC +#define SDHCI_ITAPDLY_CHGWIN 0x200 +#define SDHCI_ITAPDLY_ENABLE 0x100 +#define SDHCI_OTAPDLY_ENABLE 0x40 + +#define SDHCI_TUNING_LOOP_COUNT 40 +#define MMC_BANK2 0x2 + +struct arasan_sdhci_clk_data { + int clk_phase_in[MMC_TIMING_MMC_HS400 + 1]; + int clk_phase_out[MMC_TIMING_MMC_HS400 + 1]; +}; + struct arasan_sdhci_plat { struct mmc_config cfg; struct mmc mmc; @@ -26,29 +40,35 @@ struct arasan_sdhci_plat { struct arasan_sdhci_priv { struct sdhci_host *host; + struct arasan_sdhci_clk_data clk_data; u8 deviceid; u8 bank; + u8 no_1p8; }; -#if defined(CONFIG_ARCH_ZYNQMP) -#define MMC_HS200_BUS_SPEED 5 +#if defined(CONFIG_ARCH_ZYNQMP) || defined(CONFIG_ARCH_VERSAL) +/* Default settings for ZynqMP Clock Phases */ +const u32 zynqmp_iclk_phases[] = {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0}; +const u32 zynqmp_oclk_phases[] = {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}; + +/* Default settings for Versal Clock Phases */ +const u32 versal_iclk_phases[] = {0, 132, 132, 0, 132, 0, 0, 162, 90, 0, 0}; +const u32 versal_oclk_phases[] = {0, 60, 48, 0, 48, 72, 90, 36, 60, 90, 0}; static const u8 mode2timing[] = { - [MMC_LEGACY] = UHS_SDR12_BUS_SPEED, - [MMC_HS] = HIGH_SPEED_BUS_SPEED, - [SD_HS] = HIGH_SPEED_BUS_SPEED, - [MMC_HS_52] = HIGH_SPEED_BUS_SPEED, - [MMC_DDR_52] = HIGH_SPEED_BUS_SPEED, - [UHS_SDR12] = UHS_SDR12_BUS_SPEED, - [UHS_SDR25] = UHS_SDR25_BUS_SPEED, - [UHS_SDR50] = UHS_SDR50_BUS_SPEED, - [UHS_DDR50] = UHS_DDR50_BUS_SPEED, - [UHS_SDR104] = UHS_SDR104_BUS_SPEED, - [MMC_HS_200] = MMC_HS200_BUS_SPEED, + [MMC_LEGACY] = MMC_TIMING_LEGACY, + [MMC_HS] = MMC_TIMING_MMC_HS, + [SD_HS] = MMC_TIMING_SD_HS, + [MMC_HS_52] = MMC_TIMING_UHS_SDR50, + [MMC_DDR_52] = MMC_TIMING_UHS_DDR50, + [UHS_SDR12] = MMC_TIMING_UHS_SDR12, + [UHS_SDR25] = MMC_TIMING_UHS_SDR25, + [UHS_SDR50] = MMC_TIMING_UHS_SDR50, + [UHS_DDR50] = MMC_TIMING_UHS_DDR50, + [UHS_SDR104] = MMC_TIMING_UHS_SDR104, + [MMC_HS_200] = MMC_TIMING_MMC_HS200, }; -#define SDHCI_TUNING_LOOP_COUNT 40 - static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u8 deviceid) { u16 clk; @@ -156,17 +176,352 @@ static int arasan_sdhci_execute_tuning(struct mmc *mmc, u8 opcode) return 0; } +/** + * sdhci_zynqmp_sdcardclk_set_phase - Set the SD Output Clock Tap Delays + * + * Set the SD Output Clock Tap Delays for Output path + * + * @host: Pointer to the sdhci_host structure. + * @degrees: The clock phase shift between 0 - 359. + * Return: 0 on success and error value on error + */ +static int sdhci_zynqmp_sdcardclk_set_phase(struct sdhci_host *host, + int degrees) +{ + struct arasan_sdhci_priv *priv = dev_get_priv(host->mmc->dev); + struct mmc *mmc = (struct mmc *)host->mmc; + u8 tap_delay, tap_max = 0; + int ret; + int timing = mode2timing[mmc->selected_mode]; + + /* + * This is applicable for SDHCI_SPEC_300 and above + * ZynqMP does not set phase for <=25MHz clock. + * If degrees is zero, no need to do anything. + */ + if (SDHCI_GET_VERSION(host) < SDHCI_SPEC_300 || + timing == MMC_TIMING_LEGACY || + timing == MMC_TIMING_UHS_SDR12 || !degrees) + return 0; + + switch (timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + /* For 50MHz clock, 30 Taps are available */ + tap_max = 30; + break; + case MMC_TIMING_UHS_SDR50: + /* For 100MHz clock, 15 Taps are available */ + tap_max = 15; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* For 200MHz clock, 8 Taps are available */ + tap_max = 8; + default: + break; + } + + tap_delay = (degrees * tap_max) / 360; + + arasan_zynqmp_set_tapdelay(priv->deviceid, 0, tap_delay); + + return ret; +} + +/** + * sdhci_zynqmp_sampleclk_set_phase - Set the SD Input Clock Tap Delays + * + * Set the SD Input Clock Tap Delays for Input path + * + * @host: Pointer to the sdhci_host structure. + * @degrees: The clock phase shift between 0 - 359. + * Return: 0 on success and error value on error + */ +static int sdhci_zynqmp_sampleclk_set_phase(struct sdhci_host *host, + int degrees) +{ + struct arasan_sdhci_priv *priv = dev_get_priv(host->mmc->dev); + struct mmc *mmc = (struct mmc *)host->mmc; + u8 tap_delay, tap_max = 0; + int ret; + int timing = mode2timing[mmc->selected_mode]; + + /* + * This is applicable for SDHCI_SPEC_300 and above + * ZynqMP does not set phase for <=25MHz clock. + * If degrees is zero, no need to do anything. + */ + if (SDHCI_GET_VERSION(host) < SDHCI_SPEC_300 || + timing == MMC_TIMING_LEGACY || + timing == MMC_TIMING_UHS_SDR12 || !degrees) + return 0; + + switch (timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + /* For 50MHz clock, 120 Taps are available */ + tap_max = 120; + break; + case MMC_TIMING_UHS_SDR50: + /* For 100MHz clock, 60 Taps are available */ + tap_max = 60; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* For 200MHz clock, 30 Taps are available */ + tap_max = 30; + default: + break; + } + + tap_delay = (degrees * tap_max) / 360; + + arasan_zynqmp_set_tapdelay(priv->deviceid, tap_delay, 0); + + return ret; +} + +/** + * sdhci_versal_sdcardclk_set_phase - Set the SD Output Clock Tap Delays + * + * Set the SD Output Clock Tap Delays for Output path + * + * @host: Pointer to the sdhci_host structure. + * @degrees The clock phase shift between 0 - 359. + * Return: 0 on success and error value on error + */ +static int sdhci_versal_sdcardclk_set_phase(struct sdhci_host *host, + int degrees) +{ + struct mmc *mmc = (struct mmc *)host->mmc; + u8 tap_delay, tap_max = 0; + int ret; + int timing = mode2timing[mmc->selected_mode]; + + /* + * This is applicable for SDHCI_SPEC_300 and above + * Versal does not set phase for <=25MHz clock. + * If degrees is zero, no need to do anything. + */ + if (SDHCI_GET_VERSION(host) < SDHCI_SPEC_300 || + timing == MMC_TIMING_LEGACY || + timing == MMC_TIMING_UHS_SDR12 || !degrees) + return 0; + + switch (timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + /* For 50MHz clock, 30 Taps are available */ + tap_max = 30; + break; + case MMC_TIMING_UHS_SDR50: + /* For 100MHz clock, 15 Taps are available */ + tap_max = 15; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* For 200MHz clock, 8 Taps are available */ + tap_max = 8; + default: + break; + } + + tap_delay = (degrees * tap_max) / 360; + + /* Set the Clock Phase */ + if (tap_delay) { + u32 regval; + + regval = sdhci_readl(host, SDHCI_ARASAN_OTAPDLY_REGISTER); + regval |= SDHCI_OTAPDLY_ENABLE; + sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER); + regval |= tap_delay; + sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER); + } + + return ret; +} + +/** + * sdhci_versal_sampleclk_set_phase - Set the SD Input Clock Tap Delays + * + * Set the SD Input Clock Tap Delays for Input path + * + * @host: Pointer to the sdhci_host structure. + * @degrees The clock phase shift between 0 - 359. + * Return: 0 on success and error value on error + */ +static int sdhci_versal_sampleclk_set_phase(struct sdhci_host *host, + int degrees) +{ + struct mmc *mmc = (struct mmc *)host->mmc; + u8 tap_delay, tap_max = 0; + int ret; + int timing = mode2timing[mmc->selected_mode]; + + /* + * This is applicable for SDHCI_SPEC_300 and above + * Versal does not set phase for <=25MHz clock. + * If degrees is zero, no need to do anything. + */ + if (SDHCI_GET_VERSION(host) < SDHCI_SPEC_300 || + timing == MMC_TIMING_LEGACY || + timing == MMC_TIMING_UHS_SDR12 || !degrees) + return 0; + + switch (timing) { + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + /* For 50MHz clock, 120 Taps are available */ + tap_max = 120; + break; + case MMC_TIMING_UHS_SDR50: + /* For 100MHz clock, 60 Taps are available */ + tap_max = 60; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + /* For 200MHz clock, 30 Taps are available */ + tap_max = 30; + default: + break; + } + + tap_delay = (degrees * tap_max) / 360; + + /* Set the Clock Phase */ + if (tap_delay) { + u32 regval; + + regval = sdhci_readl(host, SDHCI_ARASAN_ITAPDLY_REGISTER); + regval |= SDHCI_ITAPDLY_CHGWIN; + sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER); + regval |= SDHCI_ITAPDLY_ENABLE; + sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER); + regval |= tap_delay; + sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER); + regval &= ~SDHCI_ITAPDLY_CHGWIN; + sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER); + } + + return ret; +} + static void arasan_sdhci_set_tapdelay(struct sdhci_host *host) { struct arasan_sdhci_priv *priv = dev_get_priv(host->mmc->dev); + struct arasan_sdhci_clk_data *clk_data = &priv->clk_data; struct mmc *mmc = (struct mmc *)host->mmc; - u8 uhsmode; + struct udevice *dev = mmc->dev; + u8 timing = mode2timing[mmc->selected_mode]; + u32 iclk_phase = clk_data->clk_phase_in[timing]; + u32 oclk_phase = clk_data->clk_phase_out[timing]; + + dev_dbg(dev, "%s, host:%s, mode:%d\n", __func__, host->name, timing); + + if (IS_ENABLED(CONFIG_ARCH_ZYNQMP) && + device_is_compatible(dev, "xlnx,zynqmp-8.9a")) { + sdhci_zynqmp_sampleclk_set_phase(host, iclk_phase); + sdhci_zynqmp_sdcardclk_set_phase(host, oclk_phase); + } else if (IS_ENABLED(CONFIG_ARCH_VERSAL) && + device_is_compatible(dev, "xlnx,versal-8.9a")) { + sdhci_versal_sampleclk_set_phase(host, iclk_phase); + sdhci_versal_sdcardclk_set_phase(host, oclk_phase); + } +} - uhsmode = mode2timing[mmc->selected_mode]; +static void arasan_dt_read_clk_phase(struct udevice *dev, unsigned char timing, + const char *prop) +{ + struct arasan_sdhci_priv *priv = dev_get_priv(dev); + struct arasan_sdhci_clk_data *clk_data = &priv->clk_data; + u32 clk_phase[2] = {0}; + + /* + * Read Tap Delay values from DT, if the DT does not contain the + * Tap Values then use the pre-defined values + */ + if (dev_read_u32_array(dev, prop, &clk_phase[0], 2)) { + dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n", + prop, clk_data->clk_phase_in[timing], + clk_data->clk_phase_out[timing]); + return; + } - if (uhsmode >= UHS_SDR25_BUS_SPEED) - arasan_zynqmp_set_tapdelay(priv->deviceid, uhsmode, - priv->bank); + /* The values read are Input and Output Clock Delays in order */ + clk_data->clk_phase_in[timing] = clk_phase[0]; + clk_data->clk_phase_out[timing] = clk_phase[1]; +} + +/** + * arasan_dt_parse_clk_phases - Read Tap Delay values from DT + * + * Called at initialization to parse the values of Tap Delays. + * + * @dev: Pointer to our struct udevice. + */ +static void arasan_dt_parse_clk_phases(struct udevice *dev) +{ + struct arasan_sdhci_priv *priv = dev_get_priv(dev); + struct arasan_sdhci_clk_data *clk_data = &priv->clk_data; + int i; + + if (IS_ENABLED(CONFIG_ARCH_ZYNQMP) && + device_is_compatible(dev, "xlnx,zynqmp-8.9a")) { + for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) { + clk_data->clk_phase_in[i] = zynqmp_iclk_phases[i]; + clk_data->clk_phase_out[i] = zynqmp_oclk_phases[i]; + } + + if (priv->bank == MMC_BANK2) { + clk_data->clk_phase_out[MMC_TIMING_UHS_SDR104] = 90; + clk_data->clk_phase_out[MMC_TIMING_MMC_HS200] = 90; + } + } + + if (IS_ENABLED(CONFIG_ARCH_VERSAL) && + device_is_compatible(dev, "xlnx,versal-8.9a")) { + for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) { + clk_data->clk_phase_in[i] = versal_iclk_phases[i]; + clk_data->clk_phase_out[i] = versal_oclk_phases[i]; + } + } + + arasan_dt_read_clk_phase(dev, MMC_TIMING_LEGACY, + "clk-phase-legacy"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_MMC_HS, + "clk-phase-mmc-hs"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_SD_HS, + "clk-phase-sd-hs"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_UHS_SDR12, + "clk-phase-uhs-sdr12"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_UHS_SDR25, + "clk-phase-uhs-sdr25"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_UHS_SDR50, + "clk-phase-uhs-sdr50"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_UHS_SDR104, + "clk-phase-uhs-sdr104"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_UHS_DDR50, + "clk-phase-uhs-ddr50"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_MMC_DDR52, + "clk-phase-mmc-ddr52"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_MMC_HS200, + "clk-phase-mmc-hs200"); + arasan_dt_read_clk_phase(dev, MMC_TIMING_MMC_HS400, + "clk-phase-mmc-hs400"); } static void arasan_sdhci_set_control_reg(struct sdhci_host *host) @@ -184,12 +539,10 @@ static void arasan_sdhci_set_control_reg(struct sdhci_host *host) } if (mmc->selected_mode > SD_HS && - mmc->selected_mode <= UHS_DDR50) + mmc->selected_mode <= MMC_HS_200) sdhci_set_uhs_timing(host); } -#endif -#if defined(CONFIG_ARCH_ZYNQMP) const struct sdhci_ops arasan_ops = { .platform_execute_tuning = &arasan_sdhci_execute_tuning, .set_delay = &arasan_sdhci_set_tapdelay, @@ -236,6 +589,9 @@ static int arasan_sdhci_probe(struct udevice *dev) host->quirks |= SDHCI_QUIRK_BROKEN_HISPD_MODE; #endif + if (priv->no_1p8) + host->quirks |= SDHCI_QUIRK_NO_1_8_V; + plat->cfg.f_max = CONFIG_ZYNQ_SDHCI_MAX_FREQ; ret = mmc_of_parse(dev, &plat->cfg); @@ -267,8 +623,9 @@ static int arasan_sdhci_ofdata_to_platdata(struct udevice *dev) priv->host->name = dev->name; -#if defined(CONFIG_ARCH_ZYNQMP) +#if defined(CONFIG_ARCH_ZYNQMP) || defined(CONFIG_ARCH_VERSAL) priv->host->ops = &arasan_ops; + arasan_dt_parse_clk_phases(dev); #endif priv->host->ioaddr = (void *)dev_read_addr(dev); @@ -277,6 +634,7 @@ static int arasan_sdhci_ofdata_to_platdata(struct udevice *dev) priv->deviceid = dev_read_u32_default(dev, "xlnx,device_id", -1); priv->bank = dev_read_u32_default(dev, "xlnx,mio-bank", 0); + priv->no_1p8 = dev_read_bool(dev, "no-1-8-v"); return 0; } diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index df4cbd52cf5..3cf3b14f05b 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -195,6 +195,9 @@ endif config NAND_PXA3XX bool "Support for NAND on PXA3xx and Armada 370/XP/38x" select SYS_NAND_SELF_INIT + select DM_MTD + select REGMAP + select SYSCON imply CMD_NAND help This enables the driver for the NAND flash device found on @@ -291,6 +294,22 @@ config NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS This flag prevent U-boot reconfigure NAND flash controller and reuse the NAND timing from 1st stage bootloader. +config NAND_OCTEONTX + bool "Support for OcteonTX NAND controller" + select SYS_NAND_SELF_INIT + imply CMD_NAND + help + This enables Nand flash controller hardware found on the OcteonTX + processors. + +config NAND_OCTEONTX_HW_ECC + bool "Support Hardware ECC for OcteonTX NAND controller" + depends on NAND_OCTEONTX + default y + help + This enables Hardware BCH engine found on the OcteonTX processors to + support ECC for NAND flash controller. + config NAND_STM32_FMC2 bool "Support for NAND controller on STM32MP SoCs" depends on ARCH_STM32MP diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 9337f6482ed..24c51b6924a 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -58,6 +58,8 @@ obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o obj-$(CONFIG_NAND_MXC) += mxc_nand.o obj-$(CONFIG_NAND_MXS) += mxs_nand.o obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o +obj-$(CONFIG_NAND_OCTEONTX) += octeontx_nand.o +obj-$(CONFIG_NAND_OCTEONTX_HW_ECC) += octeontx_bch.o obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o obj-$(CONFIG_NAND_SPEAR) += spr_nand.o obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o diff --git a/drivers/mtd/nand/raw/mxs_nand.c b/drivers/mtd/nand/raw/mxs_nand.c index a7852a841ce..e6bbfac4d68 100644 --- a/drivers/mtd/nand/raw/mxs_nand.c +++ b/drivers/mtd/nand/raw/mxs_nand.c @@ -16,19 +16,20 @@ #include <common.h> #include <cpu_func.h> #include <dm.h> -#include <asm/cache.h> -#include <linux/mtd/rawnand.h> -#include <linux/sizes.h> -#include <linux/types.h> +#include <dm/device_compat.h> #include <malloc.h> -#include <linux/errno.h> -#include <asm/io.h> +#include <mxs_nand.h> #include <asm/arch/clock.h> #include <asm/arch/imx-regs.h> +#include <asm/arch/sys_proto.h> +#include <asm/cache.h> +#include <asm/io.h> #include <asm/mach-imx/regs-bch.h> #include <asm/mach-imx/regs-gpmi.h> -#include <asm/arch/sys_proto.h> -#include <mxs_nand.h> +#include <linux/errno.h> +#include <linux/mtd/rawnand.h> +#include <linux/sizes.h> +#include <linux/types.h> #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4 @@ -115,13 +116,14 @@ static uint32_t mxs_nand_aux_status_offset(void) return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3; } -static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd, - unsigned int *chunk_num) +static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, + struct mtd_info *mtd, + unsigned int *chunk_num) { unsigned int i, j; if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) { - dev_err(this->dev, "The size of chunk0 must equal to chunkn\n"); + dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n"); return false; } @@ -135,7 +137,7 @@ static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct m if (j < geo->ecc_chunkn_size * 8) { *chunk_num = i + 1; - dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n", + dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n", geo->ecc_strength, *chunk_num); return true; } @@ -1118,7 +1120,7 @@ static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo) if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) && mtd->oobsize < 1024) || nand_info->legacy_bch_geometry) { - dev_warn(this->dev, "use legacy bch geometry\n"); + dev_warn(mtd->dev, "use legacy bch geometry\n"); return mxs_nand_legacy_calc_ecc_layout(geo, mtd); } diff --git a/drivers/mtd/nand/raw/octeontx_bch.c b/drivers/mtd/nand/raw/octeontx_bch.c new file mode 100644 index 00000000000..693706257c9 --- /dev/null +++ b/drivers/mtd/nand/raw/octeontx_bch.c @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <dm/of_access.h> +#include <malloc.h> +#include <memalign.h> +#include <nand.h> +#include <pci.h> +#include <pci_ids.h> +#include <time.h> +#include <linux/bitfield.h> +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/ioport.h> +#include <linux/libfdt.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand_bch.h> +#include <linux/mtd/nand_ecc.h> +#include <asm/io.h> +#include <asm/types.h> +#include <asm/dma-mapping.h> +#include <asm/arch/clock.h> +#include "octeontx_bch.h" + +#ifdef DEBUG +# undef CONFIG_LOGLEVEL +# define CONFIG_LOGLEVEL 8 +#endif + +LIST_HEAD(octeontx_bch_devices); +static unsigned int num_vfs = BCH_NR_VF; +static void *bch_pf; +static void *bch_vf; +static void *token; +static bool bch_pf_initialized; +static bool bch_vf_initialized; + +static int pci_enable_sriov(struct udevice *dev, int nr_virtfn) +{ + int ret; + + ret = pci_sriov_init(dev, nr_virtfn); + if (ret) + printf("%s(%s): pci_sriov_init returned %d\n", __func__, + dev->name, ret); + return ret; +} + +void *octeontx_bch_getv(void) +{ + if (!bch_vf) + return NULL; + if (bch_vf_initialized && bch_pf_initialized) + return bch_vf; + else + return NULL; +} + +void octeontx_bch_putv(void *token) +{ + bch_vf_initialized = !!token; + bch_vf = token; +} + +void *octeontx_bch_getp(void) +{ + return token; +} + +void octeontx_bch_putp(void *token) +{ + bch_pf = token; + bch_pf_initialized = !!token; +} + +static int do_bch_init(struct bch_device *bch) +{ + return 0; +} + +static void bch_reset(struct bch_device *bch) +{ + writeq(1, bch->reg_base + BCH_CTL); + mdelay(2); +} + +static void bch_disable(struct bch_device *bch) +{ + writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C); + writeq(~0ull, bch->reg_base + BCH_ERR_INT); + bch_reset(bch); +} + +static u32 bch_check_bist_status(struct bch_device *bch) +{ + return readq(bch->reg_base + BCH_BIST_RESULT); +} + +static int bch_device_init(struct bch_device *bch) +{ + u64 bist; + int rc; + + debug("%s: Resetting...\n", __func__); + /* Reset the PF when probed first */ + bch_reset(bch); + + debug("%s: Checking BIST...\n", __func__); + /* Check BIST status */ + bist = (u64)bch_check_bist_status(bch); + if (bist) { + dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist); + return -ENODEV; + } + + /* Get max VQs/VFs supported by the device */ + + bch->max_vfs = pci_sriov_get_totalvfs(bch->dev); + debug("%s: %d vfs\n", __func__, bch->max_vfs); + if (num_vfs > bch->max_vfs) { + dev_warn(dev, "Num of VFs to enable %d is greater than max available. Enabling %d VFs.\n", + num_vfs, bch->max_vfs); + num_vfs = bch->max_vfs; + } + bch->vfs_enabled = bch->max_vfs; + /* Get number of VQs/VFs to be enabled */ + /* TODO: Get CLK frequency */ + /* Reset device parameters */ + + debug("%s: Doing initialization\n", __func__); + rc = do_bch_init(bch); + + return rc; +} + +static int bch_sriov_configure(struct udevice *dev, int numvfs) +{ + struct bch_device *bch = dev_get_priv(dev); + int ret = -EBUSY; + + debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__, + dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled); + if (bch->vfs_in_use) + goto exit; + + ret = 0; + + if (numvfs > 0) { + debug("%s: Enabling sriov\n", __func__); + ret = pci_enable_sriov(dev, numvfs); + if (ret == 0) { + bch->flags |= BCH_FLAG_SRIOV_ENABLED; + ret = numvfs; + bch->vfs_enabled = numvfs; + } + } + + debug("VFs enabled: %d\n", ret); +exit: + debug("%s: Returning %d\n", __func__, ret); + return ret; +} + +static int octeontx_pci_bchpf_probe(struct udevice *dev) +{ + struct bch_device *bch; + int ret; + + debug("%s(%s)\n", __func__, dev->name); + bch = dev_get_priv(dev); + if (!bch) + return -ENOMEM; + + bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM); + bch->dev = dev; + + debug("%s: base address: %p\n", __func__, bch->reg_base); + ret = bch_device_init(bch); + if (ret) { + printf("%s(%s): init returned %d\n", __func__, dev->name, ret); + return ret; + } + INIT_LIST_HEAD(&bch->list); + list_add(&bch->list, &octeontx_bch_devices); + token = (void *)dev; + + debug("%s: Configuring SRIOV\n", __func__); + bch_sriov_configure(dev, num_vfs); + debug("%s: Done.\n", __func__); + octeontx_bch_putp(bch); + + return 0; +} + +static const struct pci_device_id octeontx_bchpf_pci_id_table[] = { + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) }, + {}, +}; + +static const struct pci_device_id octeontx_bchvf_pci_id_table[] = { + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)}, + {}, +}; + +/** + * Given a data block calculate the ecc data and fill in the response + * + * @param[in] block 8-byte aligned pointer to data block to calculate ECC + * @param block_size Size of block in bytes, must be a multiple of two. + * @param bch_level Number of errors that must be corrected. The number of + * parity bytes is equal to ((15 * bch_level) + 7) / 8. + * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64. + * @param[out] ecc 8-byte aligned pointer to where ecc data should go + * @param[in] resp pointer to where responses will be written. + * + * @return Zero on success, negative on failure. + */ +int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size, + u8 bch_level, dma_addr_t ecc, dma_addr_t resp) +{ + union bch_cmd cmd; + int rc; + + memset(&cmd, 0, sizeof(cmd)); + cmd.s.cword.ecc_gen = eg_gen; + cmd.s.cword.ecc_level = bch_level; + cmd.s.cword.size = block_size; + + cmd.s.oword.ptr = ecc; + cmd.s.iword.ptr = block; + cmd.s.rword.ptr = resp; + rc = octeontx_cmd_queue_write(QID_BCH, 1, + sizeof(cmd) / sizeof(uint64_t), cmd.u); + if (rc) + return -1; + + octeontx_bch_write_doorbell(1, vf); + + return 0; +} + +/** + * Given a data block and ecc data correct the data block + * + * @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC + * data concatenated to the end to correct + * @param block_size Size of block in bytes, must be a multiple of + * two. + * @param bch_level Number of errors that must be corrected. The + * number of parity bytes is equal to + * ((15 * bch_level) + 7) / 8. + * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64. + * @param[out] block_out 8-byte aligned pointer to corrected data buffer. + * This should not be the same as block_ecc_in. + * @param[in] resp pointer to where responses will be written. + * + * @return Zero on success, negative on failure. + */ + +int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in, + u16 block_size, u8 bch_level, + dma_addr_t block_out, dma_addr_t resp) +{ + union bch_cmd cmd; + int rc; + + memset(&cmd, 0, sizeof(cmd)); + cmd.s.cword.ecc_gen = eg_correct; + cmd.s.cword.ecc_level = bch_level; + cmd.s.cword.size = block_size; + + cmd.s.oword.ptr = block_out; + cmd.s.iword.ptr = block_ecc_in; + cmd.s.rword.ptr = resp; + rc = octeontx_cmd_queue_write(QID_BCH, 1, + sizeof(cmd) / sizeof(uint64_t), cmd.u); + if (rc) + return -1; + + octeontx_bch_write_doorbell(1, vf); + return 0; +} +EXPORT_SYMBOL(octeontx_bch_decode); + +int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp, + dma_addr_t handle) +{ + ulong start = get_timer(0); + + __iormb(); /* HW is updating *resp */ + while (!resp->s.done && get_timer(start) < 10) + __iormb(); /* HW is updating *resp */ + + if (resp->s.done) + return 0; + + return -ETIMEDOUT; +} + +struct bch_q octeontx_bch_q[QID_MAX]; + +static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id, + int max_depth, int fpa_pool, + int pool_size) +{ + /* some params are for later merge with CPT or cn83xx */ + struct bch_q *q = &octeontx_bch_q[queue_id]; + unsigned long paddr; + u64 *chunk_buffer; + int chunk = max_depth + 1; + int i, size; + + if ((unsigned int)queue_id >= QID_MAX) + return -EINVAL; + if (max_depth & chunk) /* must be 2^N - 1 */ + return -EINVAL; + + size = NQS * chunk * sizeof(u64); + chunk_buffer = dma_alloc_coherent(size, &paddr); + if (!chunk_buffer) + return -ENOMEM; + + q->base_paddr = paddr; + q->dev = dev; + q->index = 0; + q->max_depth = max_depth; + q->pool_size_m1 = pool_size; + q->base_vaddr = chunk_buffer; + + for (i = 0; i < NQS; i++) { + u64 *ixp; + int inext = (i + 1) * chunk - 1; + int j = (i + 1) % NQS; + int jnext = j * chunk; + dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64); + + ixp = &chunk_buffer[inext]; + *ixp = jbase; + } + + return 0; +} + +static int octeontx_pci_bchvf_probe(struct udevice *dev) +{ + struct bch_vf *vf; + union bch_vqx_ctl ctl; + union bch_vqx_cmd_buf cbuf; + int err; + + debug("%s(%s)\n", __func__, dev->name); + vf = dev_get_priv(dev); + if (!vf) + return -ENOMEM; + + vf->dev = dev; + + /* Map PF's configuration registers */ + vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM); + debug("%s: reg base: %p\n", __func__, vf->reg_base); + + err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0, + sizeof(union bch_cmd) * QDEPTH); + if (err) { + dev_err(dev, "octeontx_cmd_queue_initialize() failed\n"); + goto release; + } + + ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0)); + + cbuf.u = 0; + cbuf.s.ldwb = 1; + cbuf.s.dfb = 1; + cbuf.s.size = QDEPTH; + writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0)); + + writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0)); + + writeq(octeontx_bch_q[QID_BCH].base_paddr, + vf->reg_base + BCH_VQX_CMD_PTR(0)); + + octeontx_bch_putv(vf); + + debug("%s: bch vf initialization complete\n", __func__); + + if (octeontx_bch_getv()) + return octeontx_pci_nand_deferred_probe(); + + return -1; + +release: + return err; +} + +static int octeontx_pci_bchpf_remove(struct udevice *dev) +{ + struct bch_device *bch = dev_get_priv(dev); + + bch_disable(bch); + return 0; +} + +U_BOOT_DRIVER(octeontx_pci_bchpf) = { + .name = BCHPF_DRIVER_NAME, + .id = UCLASS_MISC, + .probe = octeontx_pci_bchpf_probe, + .remove = octeontx_pci_bchpf_remove, + .priv_auto_alloc_size = sizeof(struct bch_device), + .flags = DM_FLAG_OS_PREPARE, +}; + +U_BOOT_DRIVER(octeontx_pci_bchvf) = { + .name = BCHVF_DRIVER_NAME, + .id = UCLASS_MISC, + .probe = octeontx_pci_bchvf_probe, + .priv_auto_alloc_size = sizeof(struct bch_vf), +}; + +U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table); +U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table); diff --git a/drivers/mtd/nand/raw/octeontx_bch.h b/drivers/mtd/nand/raw/octeontx_bch.h new file mode 100644 index 00000000000..3aaa52c2643 --- /dev/null +++ b/drivers/mtd/nand/raw/octeontx_bch.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef __OCTEONTX_BCH_H__ +#define __OCTEONTX_BCH_H__ + +#include "octeontx_bch_regs.h" + +/* flags to indicate the features supported */ +#define BCH_FLAG_SRIOV_ENABLED BIT(1) + +/* + * BCH Registers map for 81xx + */ + +/* PF registers */ +#define BCH_CTL 0x0ull +#define BCH_ERR_CFG 0x10ull +#define BCH_BIST_RESULT 0x80ull +#define BCH_ERR_INT 0x88ull +#define BCH_ERR_INT_W1S 0x90ull +#define BCH_ERR_INT_ENA_W1C 0xA0ull +#define BCH_ERR_INT_ENA_W1S 0xA8ull + +/* VF registers */ +#define BCH_VQX_CTL(z) 0x0ull +#define BCH_VQX_CMD_BUF(z) 0x8ull +#define BCH_VQX_CMD_PTR(z) 0x20ull +#define BCH_VQX_DOORBELL(z) 0x800ull + +#define BCHPF_DRIVER_NAME "octeontx-bchpf" +#define BCHVF_DRIVER_NAME "octeontx-bchvf" + +struct bch_device { + struct list_head list; + u8 max_vfs; + u8 vfs_enabled; + u8 vfs_in_use; + u32 flags; + void __iomem *reg_base; + struct udevice *dev; +}; + +struct bch_vf { + u16 flags; + u8 vfid; + u8 node; + u8 priority; + struct udevice *dev; + void __iomem *reg_base; +}; + +struct buf_ptr { + u8 *vptr; + dma_addr_t dma_addr; + u16 size; +}; + +void *octeontx_bch_getv(void); +void octeontx_bch_putv(void *token); +void *octeontx_bch_getp(void); +void octeontx_bch_putp(void *token); +int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp, + dma_addr_t handle); +/** + * Given a data block calculate the ecc data and fill in the response + * + * @param[in] block 8-byte aligned pointer to data block to calculate ECC + * @param block_size Size of block in bytes, must be a multiple of two. + * @param bch_level Number of errors that must be corrected. The number of + * parity bytes is equal to ((15 * bch_level) + 7) / 8. + * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64. + * @param[out] ecc 8-byte aligned pointer to where ecc data should go + * @param[in] resp pointer to where responses will be written. + * + * @return Zero on success, negative on failure. + */ +int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size, + u8 bch_level, dma_addr_t ecc, dma_addr_t resp); + +/** + * Given a data block and ecc data correct the data block + * + * @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC + * data concatenated to the end to correct + * @param block_size Size of block in bytes, must be a multiple of + * two. + * @param bch_level Number of errors that must be corrected. The + * number of parity bytes is equal to + * ((15 * bch_level) + 7) / 8. + * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64. + * @param[out] block_out 8-byte aligned pointer to corrected data buffer. + * This should not be the same as block_ecc_in. + * @param[in] resp pointer to where responses will be written. + * + * @return Zero on success, negative on failure. + */ + +int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in, + u16 block_size, u8 bch_level, + dma_addr_t block_out, dma_addr_t resp); + +/** + * Ring the BCH doorbell telling it that new commands are + * available. + * + * @param num_commands Number of new commands + * @param vf virtual function handle + */ +static inline void octeontx_bch_write_doorbell(u64 num_commands, + struct bch_vf *vf) +{ + u64 num_words = num_commands * sizeof(union bch_cmd) / sizeof(uint64_t); + + writeq(num_words, vf->reg_base + BCH_VQX_DOORBELL(0)); +} + +/** + * Since it's possible (and even likely) that the NAND device will be probed + * before the BCH device has been probed, we may need to defer the probing. + * + * In this case, the initial probe returns success but the actual probing + * is deferred until the BCH VF has been probed. + * + * @return 0 for success, otherwise error + */ +int octeontx_pci_nand_deferred_probe(void); + +#endif /* __OCTEONTX_BCH_H__ */ diff --git a/drivers/mtd/nand/raw/octeontx_bch_regs.h b/drivers/mtd/nand/raw/octeontx_bch_regs.h new file mode 100644 index 00000000000..7d34438fec0 --- /dev/null +++ b/drivers/mtd/nand/raw/octeontx_bch_regs.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef __OCTEONTX_BCH_REGS_H__ +#define __OCTEONTX_BCH_REGS_H__ + +#define BCH_NR_VF 1 + +union bch_cmd { + u64 u[4]; + struct fields { + struct { + u64 size:12; + u64 reserved_12_31:20; + u64 ecc_level:4; + u64 reserved_36_61:26; + u64 ecc_gen:2; + } cword; + struct { + u64 ptr:49; + u64 reserved_49_55:7; + u64 nc:1; + u64 fw:1; + u64 reserved_58_63:6; + } oword; + struct { + u64 ptr:49; + u64 reserved_49_55:7; + u64 nc:1; + u64 reserved_57_63:7; + } iword; + struct { + u64 ptr:49; + u64 reserved_49_63:15; + } rword; + } s; +}; + +enum ecc_gen { + eg_correct, + eg_copy, + eg_gen, + eg_copy3, +}; + +/** Response from BCH instruction */ +union bch_resp { + u16 u16; + struct { + u16 num_errors:7; /** Number of errors in block */ + u16 zero:6; /** Always zero, ignore */ + u16 erased:1; /** Block is erased */ + u16 uncorrectable:1;/** too many bits flipped */ + u16 done:1; /** Block is done */ + } s; +}; + +union bch_vqx_ctl { + u64 u; + struct { + u64 reserved_0:1; + u64 cmd_be:1; + u64 max_read:4; + u64 reserved_6_15:10; + u64 erase_disable:1; + u64 one_cmd:1; + u64 early_term:4; + u64 reserved_22_63:42; + } s; +}; + +union bch_vqx_cmd_buf { + u64 u; + struct { + u64 reserved_0_32:33; + u64 size:13; + u64 dfb:1; + u64 ldwb:1; + u64 reserved_48_63:16; + } s; +}; + +/* keep queue state indexed, even though just one supported here, + * for later generalization to similarly-shaped queues on other Cavium devices + */ +enum { + QID_BCH, + QID_MAX +}; + +struct bch_q { + struct udevice *dev; + int index; + u16 max_depth; + u16 pool_size_m1; + u64 *base_vaddr; + dma_addr_t base_paddr; +}; + +extern struct bch_q octeontx_bch_q[QID_MAX]; + +/* with one dma-mapped area, virt<->phys conversions by +/- (vaddr-paddr) */ +static inline dma_addr_t qphys(int qid, void *v) +{ + struct bch_q *q = &octeontx_bch_q[qid]; + int off = (u8 *)v - (u8 *)q->base_vaddr; + + return q->base_paddr + off; +} + +#define octeontx_ptr_to_phys(v) qphys(QID_BCH, (v)) + +static inline void *qvirt(int qid, dma_addr_t p) +{ + struct bch_q *q = &octeontx_bch_q[qid]; + int off = p - q->base_paddr; + + return q->base_vaddr + off; +} + +#define octeontx_phys_to_ptr(p) qvirt(QID_BCH, (p)) + +/* plenty for interleaved r/w on two planes with 16k page, ecc_size 1k */ +/* QDEPTH >= 16, as successive chunks must align on 128-byte boundaries */ +#define QDEPTH 256 /* u64s in a command queue chunk, incl next-pointer */ +#define NQS 1 /* linked chunks in the chain */ + +/** + * Write an arbitrary number of command words to a command queue. + * This is a generic function; the fixed number of command word + * functions yield higher performance. + * + * Could merge with crypto version for FPA use on cn83xx + */ +static inline int octeontx_cmd_queue_write(int queue_id, bool use_locking, + int cmd_count, const u64 *cmds) +{ + int ret = 0; + u64 *cmd_ptr; + struct bch_q *qptr = &octeontx_bch_q[queue_id]; + + if (unlikely(cmd_count < 1 || cmd_count > 32)) + return -EINVAL; + if (unlikely(!cmds)) + return -EINVAL; + + cmd_ptr = qptr->base_vaddr; + + while (cmd_count > 0) { + int slot = qptr->index % (QDEPTH * NQS); + + if (slot % QDEPTH != QDEPTH - 1) { + cmd_ptr[slot] = *cmds++; + cmd_count--; + } + + qptr->index++; + } + + __iowmb(); /* flush commands before ringing bell */ + + return ret; +} + +#endif /* __OCTEONTX_BCH_REGS_H__ */ diff --git a/drivers/mtd/nand/raw/octeontx_nand.c b/drivers/mtd/nand/raw/octeontx_nand.c new file mode 100644 index 00000000000..ad219171e9c --- /dev/null +++ b/drivers/mtd/nand/raw/octeontx_nand.c @@ -0,0 +1,2257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <dm/device-internal.h> +#include <dm/devres.h> +#include <dm/of_access.h> +#include <malloc.h> +#include <memalign.h> +#include <nand.h> +#include <pci.h> +#include <time.h> +#include <linux/bitfield.h> +#include <linux/ctype.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/ioport.h> +#include <linux/libfdt.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand_bch.h> +#include <linux/mtd/nand_ecc.h> +#include <asm/io.h> +#include <asm/types.h> +#include <asm/dma-mapping.h> +#include <asm/arch/clock.h> +#include "octeontx_bch.h" + +#ifdef DEBUG +# undef CONFIG_LOGLEVEL +# define CONFIG_LOGLEVEL 8 +#endif + +/* + * The NDF_CMD queue takes commands between 16 - 128 bit. + * All commands must be 16 bit aligned and are little endian. + * WAIT_STATUS commands must be 64 bit aligned. + * Commands are selected by the 4 bit opcode. + * + * Available Commands: + * + * 16 Bit: + * NOP + * WAIT + * BUS_ACQ, BUS_REL + * CHIP_EN, CHIP_DIS + * + * 32 Bit: + * CLE_CMD + * RD_CMD, RD_EDO_CMD + * WR_CMD + * + * 64 Bit: + * SET_TM_PAR + * + * 96 Bit: + * ALE_CMD + * + * 128 Bit: + * WAIT_STATUS, WAIT_STATUS_ALE + */ + +/* NDF Register offsets */ +#define NDF_CMD 0x0 +#define NDF_MISC 0x8 +#define NDF_ECC_CNT 0x10 +#define NDF_DRBELL 0x30 +#define NDF_ST_REG 0x38 /* status */ +#define NDF_INT 0x40 +#define NDF_INT_W1S 0x48 +#define NDF_DMA_CFG 0x50 +#define NDF_DMA_ADR 0x58 +#define NDF_INT_ENA_W1C 0x60 +#define NDF_INT_ENA_W1S 0x68 + +/* NDF command opcodes */ +#define NDF_OP_NOP 0x0 +#define NDF_OP_SET_TM_PAR 0x1 +#define NDF_OP_WAIT 0x2 +#define NDF_OP_CHIP_EN_DIS 0x3 +#define NDF_OP_CLE_CMD 0x4 +#define NDF_OP_ALE_CMD 0x5 +#define NDF_OP_WR_CMD 0x8 +#define NDF_OP_RD_CMD 0x9 +#define NDF_OP_RD_EDO_CMD 0xa +#define NDF_OP_WAIT_STATUS 0xb /* same opcode for WAIT_STATUS_ALE */ +#define NDF_OP_BUS_ACQ_REL 0xf + +#define NDF_BUS_ACQUIRE 1 +#define NDF_BUS_RELEASE 0 + +#define DBGX_EDSCR(X) (0x87A008000088 + (X) * 0x80000) + +struct ndf_nop_cmd { + u16 opcode: 4; + u16 nop: 12; +}; + +struct ndf_wait_cmd { + u16 opcode:4; + u16 r_b:1; /* wait for one cycle or PBUS_WAIT deassert */ + u16:3; + u16 wlen:3; /* timing parameter select */ + u16:5; +}; + +struct ndf_bus_cmd { + u16 opcode:4; + u16 direction:4; /* 1 = acquire, 0 = release */ + u16:8; +}; + +struct ndf_chip_cmd { + u16 opcode:4; + u16 chip:3; /* select chip, 0 = disable */ + u16 enable:1; /* 1 = enable, 0 = disable */ + u16 bus_width:2; /* 10 = 16 bit, 01 = 8 bit */ + u16:6; +}; + +struct ndf_cle_cmd { + u32 opcode:4; + u32:4; + u32 cmd_data:8; /* command sent to the PBUS AD pins */ + u32 clen1:3; /* time between PBUS CLE and WE asserts */ + u32 clen2:3; /* time WE remains asserted */ + u32 clen3:3; /* time between WE deassert and CLE */ + u32:7; +}; + +/* RD_EDO_CMD uses the same layout as RD_CMD */ +struct ndf_rd_cmd { + u32 opcode:4; + u32 data:16; /* data bytes */ + u32 rlen1:3; + u32 rlen2:3; + u32 rlen3:3; + u32 rlen4:3; +}; + +struct ndf_wr_cmd { + u32 opcode:4; + u32 data:16; /* data bytes */ + u32:4; + u32 wlen1:3; + u32 wlen2:3; + u32:3; +}; + +struct ndf_set_tm_par_cmd { + u64 opcode:4; + u64 tim_mult:4; /* multiplier for the seven parameters */ + u64 tm_par1:8; /* --> Following are the 7 timing parameters that */ + u64 tm_par2:8; /* specify the number of coprocessor cycles. */ + u64 tm_par3:8; /* A value of zero means one cycle. */ + u64 tm_par4:8; /* All values are scaled by tim_mult */ + u64 tm_par5:8; /* using tim_par * (2 ^ tim_mult). */ + u64 tm_par6:8; + u64 tm_par7:8; +}; + +struct ndf_ale_cmd { + u32 opcode:4; + u32:4; + u32 adr_byte_num:4; /* number of address bytes to be sent */ + u32:4; + u32 alen1:3; + u32 alen2:3; + u32 alen3:3; + u32 alen4:3; + u32:4; + u8 adr_byt1; + u8 adr_byt2; + u8 adr_byt3; + u8 adr_byt4; + u8 adr_byt5; + u8 adr_byt6; + u8 adr_byt7; + u8 adr_byt8; +}; + +struct ndf_wait_status_cmd { + u32 opcode:4; + u32:4; + u32 data:8; /** data */ + u32 clen1:3; + u32 clen2:3; + u32 clen3:3; + u32:8; + /** set to 5 to select WAIT_STATUS_ALE command */ + u32 ale_ind:8; + /** ALE only: number of address bytes to be sent */ + u32 adr_byte_num:4; + u32:4; + u32 alen1:3; /* ALE only */ + u32 alen2:3; /* ALE only */ + u32 alen3:3; /* ALE only */ + u32 alen4:3; /* ALE only */ + u32:4; + u8 adr_byt[4]; /* ALE only */ + u32 nine:4; /* set to 9 */ + u32 and_mask:8; + u32 comp_byte:8; + u32 rlen1:3; + u32 rlen2:3; + u32 rlen3:3; + u32 rlen4:3; +}; + +union ndf_cmd { + u64 val[2]; + union { + struct ndf_nop_cmd nop; + struct ndf_wait_cmd wait; + struct ndf_bus_cmd bus_acq_rel; + struct ndf_chip_cmd chip_en_dis; + struct ndf_cle_cmd cle_cmd; + struct ndf_rd_cmd rd_cmd; + struct ndf_wr_cmd wr_cmd; + struct ndf_set_tm_par_cmd set_tm_par; + struct ndf_ale_cmd ale_cmd; + struct ndf_wait_status_cmd wait_status; + } u; +}; + +/** Disable multi-bit error hangs */ +#define NDF_MISC_MB_DIS BIT_ULL(27) +/** High watermark for NBR FIFO or load/store operations */ +#define NDF_MISC_NBR_HWM GENMASK_ULL(26, 24) +/** Wait input filter count */ +#define NDF_MISC_WAIT_CNT GENMASK_ULL(23, 18) +/** Unfilled NFD_CMD queue bytes */ +#define NDF_MISC_FR_BYTE GENMASK_ULL(17, 7) +/** Set by HW when it reads the last 8 bytes of NDF_CMD */ +#define NDF_MISC_RD_DONE BIT_ULL(6) +/** Set by HW when it reads. SW read of NDF_CMD clears it */ +#define NDF_MISC_RD_VAL BIT_ULL(5) +/** Let HW read NDF_CMD queue. Cleared on SW NDF_CMD write */ +#define NDF_MISC_RD_CMD BIT_ULL(4) +/** Boot disable */ +#define NDF_MISC_BT_DIS BIT_ULL(2) +/** Stop command execution after completing command queue */ +#define NDF_MISC_EX_DIS BIT_ULL(1) +/** Reset fifo */ +#define NDF_MISC_RST_FF BIT_ULL(0) + +/** DMA engine enable */ +#define NDF_DMA_CFG_EN BIT_ULL(63) +/** Read or write */ +#define NDF_DMA_CFG_RW BIT_ULL(62) +/** Terminates DMA and clears enable bit */ +#define NDF_DMA_CFG_CLR BIT_ULL(61) +/** 32-bit swap enable */ +#define NDF_DMA_CFG_SWAP32 BIT_ULL(59) +/** 16-bit swap enable */ +#define NDF_DMA_CFG_SWAP16 BIT_ULL(58) +/** 8-bit swap enable */ +#define NDF_DMA_CFG_SWAP8 BIT_ULL(57) +/** Endian mode */ +#define NDF_DMA_CFG_CMD_BE BIT_ULL(56) +/** Number of 64 bit transfers */ +#define NDF_DMA_CFG_SIZE GENMASK_ULL(55, 36) + +/** Command execution status idle */ +#define NDF_ST_REG_EXE_IDLE BIT_ULL(15) +/** Command execution SM states */ +#define NDF_ST_REG_EXE_SM GENMASK_ULL(14, 11) +/** DMA and load SM states */ +#define NDF_ST_REG_BT_SM GENMASK_ULL(10, 7) +/** Queue read-back SM bad state */ +#define NDF_ST_REG_RD_FF_BAD BIT_ULL(6) +/** Queue read-back SM states */ +#define NDF_ST_REG_RD_FF GENMASK_ULL(5, 4) +/** Main SM is in a bad state */ +#define NDF_ST_REG_MAIN_BAD BIT_ULL(3) +/** Main SM states */ +#define NDF_ST_REG_MAIN_SM GENMASK_ULL(2, 0) + +#define MAX_NAND_NAME_LEN 64 +#if (defined(NAND_MAX_PAGESIZE) && (NAND_MAX_PAGESIZE > 4096)) || \ + !defined(NAND_MAX_PAGESIZE) +# undef NAND_MAX_PAGESIZE +# define NAND_MAX_PAGESIZE 4096 +#endif +#if (defined(NAND_MAX_OOBSIZE) && (NAND_MAX_OOBSIZE > 256)) || \ + !defined(NAND_MAX_OOBSIZE) +# undef NAND_MAX_OOBSIZE +# define NAND_MAX_OOBSIZE 256 +#endif + +#define OCTEONTX_NAND_DRIVER_NAME "octeontx_nand" + +#define NDF_TIMEOUT 1000 /** Timeout in ms */ +#define USEC_PER_SEC 1000000 /** Linux compatibility */ +#ifndef NAND_MAX_CHIPS +# define NAND_MAX_CHIPS 8 /** Linux compatibility */ +#endif + +struct octeontx_nand_chip { + struct list_head node; + struct nand_chip nand; + struct ndf_set_tm_par_cmd timings; + int cs; + int selected_page; + int iface_mode; + int row_bytes; + int col_bytes; + bool oob_only; + bool iface_set; +}; + +struct octeontx_nand_buf { + u8 *dmabuf; + dma_addr_t dmaaddr; + int dmabuflen; + int data_len; + int data_index; +}; + +/** NAND flash controller (NDF) related information */ +struct octeontx_nfc { + struct nand_hw_control controller; + struct udevice *dev; + void __iomem *base; + struct list_head chips; + int selected_chip; /* Currently selected NAND chip number */ + + /* + * Status is separate from octeontx_nand_buf because + * it can be used in parallel and during init. + */ + u8 *stat; + dma_addr_t stat_addr; + bool use_status; + + struct octeontx_nand_buf buf; + union bch_resp *bch_resp; + dma_addr_t bch_rhandle; + + /* BCH of all-0xff, so erased pages read as error-free */ + unsigned char *eccmask; +}; + +/* settable timings - 0..7 select timing of alen1..4/clen1..3/etc */ +enum tm_idx { + t0, /* fixed at 4<<mult cycles */ + t1, t2, t3, t4, t5, t6, t7, /* settable per ONFI-timing mode */ +}; + +struct octeontx_probe_device { + struct list_head list; + struct udevice *dev; +}; + +static struct bch_vf *bch_vf; +/** Deferred devices due to BCH not being ready */ +LIST_HEAD(octeontx_pci_nand_deferred_devices); + +/** default parameters used for probing chips */ +#define MAX_ONFI_MODE 5 + +static int default_onfi_timing; +static int slew_ns = 2; /* default timing padding */ +static int def_ecc_size = 512; /* 1024 best for sw_bch, <= 4095 for hw_bch */ +static int default_width = 1; /* 8 bit */ +static int default_page_size = 2048; +static struct ndf_set_tm_par_cmd default_timing_parms; + +/** Port from Linux */ +#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \ +({ \ + ulong __start = get_timer(0); \ + void *__addr = (addr); \ + const ulong __timeout_ms = timeout_us / 1000; \ + do { \ + (val) = readq(__addr); \ + if (cond) \ + break; \ + if (timeout_us && get_timer(__start) > __timeout_ms) { \ + (val) = readq(__addr); \ + break; \ + } \ + if (delay_us) \ + udelay(delay_us); \ + } while (1); \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + +/** Ported from Linux 4.9.0 include/linux/of.h for compatibility */ +static inline int of_get_child_count(const ofnode node) +{ + return fdtdec_get_child_count(gd->fdt_blob, ofnode_to_offset(node)); +} + +/** + * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c + */ +static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (section || !ecc->total) + return -ERANGE; + + oobregion->length = ecc->total; + oobregion->offset = mtd->oobsize - oobregion->length; + + return 0; +} + +/** + * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c + */ +static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (section) + return -ERANGE; + + oobregion->length = mtd->oobsize - ecc->total - 2; + oobregion->offset = 2; + + return 0; +} + +static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = { + .ecc = nand_ooblayout_ecc_lp, + .rfree = nand_ooblayout_free_lp, +}; + +static inline struct octeontx_nand_chip *to_otx_nand(struct nand_chip *nand) +{ + return container_of(nand, struct octeontx_nand_chip, nand); +} + +static inline struct octeontx_nfc *to_otx_nfc(struct nand_hw_control *ctrl) +{ + return container_of(ctrl, struct octeontx_nfc, controller); +} + +static int octeontx_nand_calc_ecc_layout(struct nand_chip *nand) +{ + struct nand_ecclayout *layout = nand->ecc.layout; + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + struct mtd_info *mtd = &nand->mtd; + int oobsize = mtd->oobsize; + int i; + bool layout_alloc = false; + + if (!layout) { + layout = devm_kzalloc(tn->dev, sizeof(*layout), GFP_KERNEL); + if (!layout) + return -ENOMEM; + nand->ecc.layout = layout; + layout_alloc = true; + } + layout->eccbytes = nand->ecc.steps * nand->ecc.bytes; + /* Reserve 2 bytes for bad block marker */ + if (layout->eccbytes + 2 > oobsize) { + pr_err("No suitable oob scheme available for oobsize %d eccbytes %u\n", + oobsize, layout->eccbytes); + goto fail; + } + /* put ecc bytes at oob tail */ + for (i = 0; i < layout->eccbytes; i++) + layout->eccpos[i] = oobsize - layout->eccbytes + i; + layout->oobfree[0].offset = 2; + layout->oobfree[0].length = oobsize - 2 - layout->eccbytes; + nand->ecc.layout = layout; + return 0; + +fail: + if (layout_alloc) + kfree(layout); + return -1; +} + +/* + * Read a single byte from the temporary buffer. Used after READID + * to get the NAND information and for STATUS. + */ +static u8 octeontx_nand_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + + if (tn->use_status) { + tn->use_status = false; + return *tn->stat; + } + + if (tn->buf.data_index < tn->buf.data_len) + return tn->buf.dmabuf[tn->buf.data_index++]; + + dev_err(tn->dev, "No data to read, idx: 0x%x, len: 0x%x\n", + tn->buf.data_index, tn->buf.data_len); + + return 0xff; +} + +/* + * Read a number of pending bytes from the temporary buffer. Used + * to get page and OOB data. + */ +static void octeontx_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + + if (len > tn->buf.data_len - tn->buf.data_index) { + dev_err(tn->dev, "Not enough data for read of %d bytes\n", len); + return; + } + + memcpy(buf, tn->buf.dmabuf + tn->buf.data_index, len); + tn->buf.data_index += len; +} + +static void octeontx_nand_write_buf(struct mtd_info *mtd, + const u8 *buf, int len) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + + memcpy(tn->buf.dmabuf + tn->buf.data_len, buf, len); + tn->buf.data_len += len; +} + +/* Overwrite default function to avoid sync abort on chip = -1. */ +static void octeontx_nand_select_chip(struct mtd_info *mtd, int chip) +{ +} + +static inline int timing_to_cycle(u32 psec, unsigned long clock) +{ + unsigned int ns; + int ticks; + + ns = DIV_ROUND_UP(psec, 1000); + ns += slew_ns; + + /* no rounding needed since clock is multiple of 1MHz */ + clock /= 1000000; + ns *= clock; + + ticks = DIV_ROUND_UP(ns, 1000); + + /* actual delay is (tm_parX+1)<<tim_mult */ + if (ticks) + ticks--; + + return ticks; +} + +static void set_timings(struct octeontx_nand_chip *chip, + struct ndf_set_tm_par_cmd *tp, + const struct nand_sdr_timings *timings, + unsigned long sclk) +{ + /* scaled coprocessor-cycle values */ + u32 s_wh, s_cls, s_clh, s_rp, s_wb, s_wc; + + tp->tim_mult = 0; + s_wh = timing_to_cycle(timings->tWH_min, sclk); + s_cls = timing_to_cycle(timings->tCLS_min, sclk); + s_clh = timing_to_cycle(timings->tCLH_min, sclk); + s_rp = timing_to_cycle(timings->tRP_min, sclk); + s_wb = timing_to_cycle(timings->tWB_max, sclk); + s_wc = timing_to_cycle(timings->tWC_min, sclk); + + tp->tm_par1 = s_wh; + tp->tm_par2 = s_clh; + tp->tm_par3 = s_rp + 1; + tp->tm_par4 = s_cls - s_wh; + tp->tm_par5 = s_wc - s_wh + 1; + tp->tm_par6 = s_wb; + tp->tm_par7 = 0; + tp->tim_mult++; /* overcompensate for bad math */ + + /* TODO: comment parameter re-use */ + + pr_debug("%s: tim_par: mult: %d p1: %d p2: %d p3: %d\n", + __func__, tp->tim_mult, tp->tm_par1, tp->tm_par2, tp->tm_par3); + pr_debug(" p4: %d p5: %d p6: %d p7: %d\n", + tp->tm_par4, tp->tm_par5, tp->tm_par6, tp->tm_par7); +} + +static int set_default_timings(struct octeontx_nfc *tn, + const struct nand_sdr_timings *timings) +{ + unsigned long sclk = octeontx_get_io_clock(); + + set_timings(NULL, &default_timing_parms, timings, sclk); + return 0; +} + +static int octeontx_nfc_chip_set_timings(struct octeontx_nand_chip *chip, + const struct nand_sdr_timings *timings) +{ + /*struct octeontx_nfc *tn = to_otx_nfc(chip->nand.controller);*/ + unsigned long sclk = octeontx_get_io_clock(); + + set_timings(chip, &chip->timings, timings, sclk); + return 0; +} + +/* How many bytes are free in the NFD_CMD queue? */ +static int ndf_cmd_queue_free(struct octeontx_nfc *tn) +{ + u64 ndf_misc; + + ndf_misc = readq(tn->base + NDF_MISC); + return FIELD_GET(NDF_MISC_FR_BYTE, ndf_misc); +} + +/* Submit a command to the NAND command queue. */ +static int ndf_submit(struct octeontx_nfc *tn, union ndf_cmd *cmd) +{ + int opcode = cmd->val[0] & 0xf; + + switch (opcode) { + /* All these commands fit in one 64bit word */ + case NDF_OP_NOP: + case NDF_OP_SET_TM_PAR: + case NDF_OP_WAIT: + case NDF_OP_CHIP_EN_DIS: + case NDF_OP_CLE_CMD: + case NDF_OP_WR_CMD: + case NDF_OP_RD_CMD: + case NDF_OP_RD_EDO_CMD: + case NDF_OP_BUS_ACQ_REL: + if (ndf_cmd_queue_free(tn) < 8) + goto full; + writeq(cmd->val[0], tn->base + NDF_CMD); + break; + case NDF_OP_ALE_CMD: + /* ALE commands take either one or two 64bit words */ + if (cmd->u.ale_cmd.adr_byte_num < 5) { + if (ndf_cmd_queue_free(tn) < 8) + goto full; + writeq(cmd->val[0], tn->base + NDF_CMD); + } else { + if (ndf_cmd_queue_free(tn) < 16) + goto full; + writeq(cmd->val[0], tn->base + NDF_CMD); + writeq(cmd->val[1], tn->base + NDF_CMD); + } + break; + case NDF_OP_WAIT_STATUS: /* Wait status commands take two 64bit words */ + if (ndf_cmd_queue_free(tn) < 16) + goto full; + writeq(cmd->val[0], tn->base + NDF_CMD); + writeq(cmd->val[1], tn->base + NDF_CMD); + break; + default: + dev_err(tn->dev, "%s: unknown command: %u\n", __func__, opcode); + return -EINVAL; + } + return 0; + +full: + dev_err(tn->dev, "%s: no space left in command queue\n", __func__); + return -ENOMEM; +} + +/** + * Wait for the ready/busy signal. First wait for busy to be valid, + * then wait for busy to de-assert. + */ +static int ndf_build_wait_busy(struct octeontx_nfc *tn) +{ + union ndf_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.u.wait.opcode = NDF_OP_WAIT; + cmd.u.wait.r_b = 1; + cmd.u.wait.wlen = t6; + + if (ndf_submit(tn, &cmd)) + return -ENOMEM; + return 0; +} + +static bool ndf_dma_done(struct octeontx_nfc *tn) +{ + u64 dma_cfg; + + /* Enable bit should be clear after a transfer */ + dma_cfg = readq(tn->base + NDF_DMA_CFG); + if (!(dma_cfg & NDF_DMA_CFG_EN)) + return true; + + return false; +} + +static int ndf_wait(struct octeontx_nfc *tn) +{ + ulong start = get_timer(0); + bool done; + + while (!(done = ndf_dma_done(tn)) && get_timer(start) < NDF_TIMEOUT) + ; + + if (!done) { + dev_err(tn->dev, "%s: timeout error\n", __func__); + return -ETIMEDOUT; + } + return 0; +} + +static int ndf_wait_idle(struct octeontx_nfc *tn) +{ + u64 val; + u64 dval = 0; + int rc; + int pause = 100; + u64 tot_us = USEC_PER_SEC / 10; + + rc = readq_poll_timeout(tn->base + NDF_ST_REG, + val, val & NDF_ST_REG_EXE_IDLE, pause, tot_us); + if (!rc) + rc = readq_poll_timeout(tn->base + NDF_DMA_CFG, + dval, !(dval & NDF_DMA_CFG_EN), + pause, tot_us); + + return rc; +} + +/** Issue set timing parameters */ +static int ndf_queue_cmd_timing(struct octeontx_nfc *tn, + struct ndf_set_tm_par_cmd *timings) +{ + union ndf_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.u.set_tm_par.opcode = NDF_OP_SET_TM_PAR; + cmd.u.set_tm_par.tim_mult = timings->tim_mult; + cmd.u.set_tm_par.tm_par1 = timings->tm_par1; + cmd.u.set_tm_par.tm_par2 = timings->tm_par2; + cmd.u.set_tm_par.tm_par3 = timings->tm_par3; + cmd.u.set_tm_par.tm_par4 = timings->tm_par4; + cmd.u.set_tm_par.tm_par5 = timings->tm_par5; + cmd.u.set_tm_par.tm_par6 = timings->tm_par6; + cmd.u.set_tm_par.tm_par7 = timings->tm_par7; + return ndf_submit(tn, &cmd); +} + +/** Issue bus acquire or release */ +static int ndf_queue_cmd_bus(struct octeontx_nfc *tn, int direction) +{ + union ndf_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.u.bus_acq_rel.opcode = NDF_OP_BUS_ACQ_REL; + cmd.u.bus_acq_rel.direction = direction; + return ndf_submit(tn, &cmd); +} + +/* Issue chip select or deselect */ +static int ndf_queue_cmd_chip(struct octeontx_nfc *tn, int enable, int chip, + int width) +{ + union ndf_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.u.chip_en_dis.opcode = NDF_OP_CHIP_EN_DIS; + cmd.u.chip_en_dis.chip = chip; + cmd.u.chip_en_dis.enable = enable; + cmd.u.chip_en_dis.bus_width = width; + return ndf_submit(tn, &cmd); +} + +static int ndf_queue_cmd_wait(struct octeontx_nfc *tn, int t_delay) +{ + union ndf_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.u.wait.opcode = NDF_OP_WAIT; + cmd.u.wait.wlen = t_delay; + return ndf_submit(tn, &cmd); +} + +static int ndf_queue_cmd_cle(struct octeontx_nfc *tn, int command) +{ + union ndf_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.u.cle_cmd.opcode = NDF_OP_CLE_CMD; + cmd.u.cle_cmd.cmd_data = command; + cmd.u.cle_cmd.clen1 = t4; + cmd.u.cle_cmd.clen2 = t1; + cmd.u.cle_cmd.clen3 = t2; + return ndf_submit(tn, &cmd); +} + +static int ndf_queue_cmd_ale(struct octeontx_nfc *tn, int addr_bytes, + struct nand_chip *nand, u64 page, + u32 col, int page_size) +{ + struct octeontx_nand_chip *octeontx_nand = (nand) ? + to_otx_nand(nand) : NULL; + union ndf_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.u.ale_cmd.opcode = NDF_OP_ALE_CMD; + cmd.u.ale_cmd.adr_byte_num = addr_bytes; + + /* set column bit for OOB area, assume OOB follows page */ + if (octeontx_nand && octeontx_nand->oob_only) + col += page_size; + + /* page is u64 for this generality, even if cmdfunc() passes int */ + switch (addr_bytes) { + /* 4-8 bytes: page, then 2-byte col */ + case 8: + cmd.u.ale_cmd.adr_byt8 = (page >> 40) & 0xff; + fallthrough; + case 7: + cmd.u.ale_cmd.adr_byt7 = (page >> 32) & 0xff; + fallthrough; + case 6: + cmd.u.ale_cmd.adr_byt6 = (page >> 24) & 0xff; + fallthrough; + case 5: + cmd.u.ale_cmd.adr_byt5 = (page >> 16) & 0xff; + fallthrough; + case 4: + cmd.u.ale_cmd.adr_byt4 = (page >> 8) & 0xff; + cmd.u.ale_cmd.adr_byt3 = page & 0xff; + cmd.u.ale_cmd.adr_byt2 = (col >> 8) & 0xff; + cmd.u.ale_cmd.adr_byt1 = col & 0xff; + break; + /* 1-3 bytes: just the page address */ + case 3: + cmd.u.ale_cmd.adr_byt3 = (page >> 16) & 0xff; + fallthrough; + case 2: + cmd.u.ale_cmd.adr_byt2 = (page >> 8) & 0xff; + fallthrough; + case 1: + cmd.u.ale_cmd.adr_byt1 = page & 0xff; + break; + default: + break; + } + + cmd.u.ale_cmd.alen1 = t3; + cmd.u.ale_cmd.alen2 = t1; + cmd.u.ale_cmd.alen3 = t5; + cmd.u.ale_cmd.alen4 = t2; + return ndf_submit(tn, &cmd); +} + +static int ndf_queue_cmd_write(struct octeontx_nfc *tn, int len) +{ + union ndf_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.u.wr_cmd.opcode = NDF_OP_WR_CMD; + cmd.u.wr_cmd.data = len; + cmd.u.wr_cmd.wlen1 = t3; + cmd.u.wr_cmd.wlen2 = t1; + return ndf_submit(tn, &cmd); +} + +static int ndf_build_pre_cmd(struct octeontx_nfc *tn, int cmd1, + int addr_bytes, u64 page, u32 col, int cmd2) +{ + struct nand_chip *nand = tn->controller.active; + struct octeontx_nand_chip *octeontx_nand; + struct ndf_set_tm_par_cmd *timings; + int width, page_size, rc; + + /* Also called before chip probing is finished */ + if (!nand) { + timings = &default_timing_parms; + page_size = default_page_size; + width = default_width; + } else { + octeontx_nand = to_otx_nand(nand); + timings = &octeontx_nand->timings; + page_size = nand->mtd.writesize; + if (nand->options & NAND_BUSWIDTH_16) + width = 2; + else + width = 1; + } + rc = ndf_queue_cmd_timing(tn, timings); + if (rc) + return rc; + + rc = ndf_queue_cmd_bus(tn, NDF_BUS_ACQUIRE); + if (rc) + return rc; + + rc = ndf_queue_cmd_chip(tn, 1, tn->selected_chip, width); + if (rc) + return rc; + + rc = ndf_queue_cmd_wait(tn, t1); + if (rc) + return rc; + + rc = ndf_queue_cmd_cle(tn, cmd1); + if (rc) + return rc; + + if (addr_bytes) { + rc = ndf_build_wait_busy(tn); + if (rc) + return rc; + + rc = ndf_queue_cmd_ale(tn, addr_bytes, nand, + page, col, page_size); + if (rc) + return rc; + } + + /* CLE 2 */ + if (cmd2) { + rc = ndf_build_wait_busy(tn); + if (rc) + return rc; + + rc = ndf_queue_cmd_cle(tn, cmd2); + if (rc) + return rc; + } + return 0; +} + +static int ndf_build_post_cmd(struct octeontx_nfc *tn, int hold_time) +{ + int rc; + + /* Deselect chip */ + rc = ndf_queue_cmd_chip(tn, 0, 0, 0); + if (rc) + return rc; + + rc = ndf_queue_cmd_wait(tn, t2); + if (rc) + return rc; + + /* Release bus */ + rc = ndf_queue_cmd_bus(tn, 0); + if (rc) + return rc; + + rc = ndf_queue_cmd_wait(tn, hold_time); + if (rc) + return rc; + + /* + * Last action is ringing the doorbell with number of bus + * acquire-releases cycles (currently 1). + */ + writeq(1, tn->base + NDF_DRBELL); + return 0; +} + +/* Setup the NAND DMA engine for a transfer. */ +static void ndf_setup_dma(struct octeontx_nfc *tn, int is_write, + dma_addr_t bus_addr, int len) +{ + u64 dma_cfg; + + dma_cfg = FIELD_PREP(NDF_DMA_CFG_RW, is_write) | + FIELD_PREP(NDF_DMA_CFG_SIZE, (len >> 3) - 1); + dma_cfg |= NDF_DMA_CFG_EN; + writeq(bus_addr, tn->base + NDF_DMA_ADR); + writeq(dma_cfg, tn->base + NDF_DMA_CFG); +} + +static int octeontx_nand_reset(struct octeontx_nfc *tn) +{ + int rc; + + rc = ndf_build_pre_cmd(tn, NAND_CMD_RESET, 0, 0, 0, 0); + if (rc) + return rc; + + rc = ndf_build_wait_busy(tn); + if (rc) + return rc; + + rc = ndf_build_post_cmd(tn, t2); + if (rc) + return rc; + + return 0; +} + +static int ndf_read(struct octeontx_nfc *tn, int cmd1, int addr_bytes, + u64 page, u32 col, int cmd2, int len) +{ + dma_addr_t bus_addr = tn->use_status ? tn->stat_addr : tn->buf.dmaaddr; + struct nand_chip *nand = tn->controller.active; + int timing_mode, bytes, rc; + union ndf_cmd cmd; + u64 start, end; + + pr_debug("%s(%p, 0x%x, 0x%x, 0x%llx, 0x%x, 0x%x, 0x%x)\n", __func__, + tn, cmd1, addr_bytes, page, col, cmd2, len); + if (!nand) + timing_mode = default_onfi_timing; + else + timing_mode = nand->onfi_timing_mode_default; + + /* Build the command and address cycles */ + rc = ndf_build_pre_cmd(tn, cmd1, addr_bytes, page, col, cmd2); + if (rc) { + dev_err(tn->dev, "Build pre command failed\n"); + return rc; + } + + /* This waits for some time, then waits for busy to be de-asserted. */ + rc = ndf_build_wait_busy(tn); + if (rc) { + dev_err(tn->dev, "Wait timeout\n"); + return rc; + } + + memset(&cmd, 0, sizeof(cmd)); + + if (timing_mode < 4) + cmd.u.rd_cmd.opcode = NDF_OP_RD_CMD; + else + cmd.u.rd_cmd.opcode = NDF_OP_RD_EDO_CMD; + + cmd.u.rd_cmd.data = len; + cmd.u.rd_cmd.rlen1 = t7; + cmd.u.rd_cmd.rlen2 = t3; + cmd.u.rd_cmd.rlen3 = t1; + cmd.u.rd_cmd.rlen4 = t7; + rc = ndf_submit(tn, &cmd); + if (rc) { + dev_err(tn->dev, "Error submitting command\n"); + return rc; + } + + start = (u64)bus_addr; + ndf_setup_dma(tn, 0, bus_addr, len); + + rc = ndf_build_post_cmd(tn, t2); + if (rc) { + dev_err(tn->dev, "Build post command failed\n"); + return rc; + } + + /* Wait for the DMA to complete */ + rc = ndf_wait(tn); + if (rc) { + dev_err(tn->dev, "DMA timed out\n"); + return rc; + } + + end = readq(tn->base + NDF_DMA_ADR); + bytes = end - start; + + /* Make sure NDF is really done */ + rc = ndf_wait_idle(tn); + if (rc) { + dev_err(tn->dev, "poll idle failed\n"); + return rc; + } + + pr_debug("%s: Read %d bytes\n", __func__, bytes); + return bytes; +} + +static int octeontx_nand_get_features(struct mtd_info *mtd, + struct nand_chip *chip, int feature_addr, + u8 *subfeature_para) +{ + struct nand_chip *nand = chip; + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + int len = 8; + int rc; + + pr_debug("%s: feature addr: 0x%x\n", __func__, feature_addr); + memset(tn->buf.dmabuf, 0xff, len); + tn->buf.data_index = 0; + tn->buf.data_len = 0; + rc = ndf_read(tn, NAND_CMD_GET_FEATURES, 1, feature_addr, 0, 0, len); + if (rc) + return rc; + + memcpy(subfeature_para, tn->buf.dmabuf, ONFI_SUBFEATURE_PARAM_LEN); + + return 0; +} + +static int octeontx_nand_set_features(struct mtd_info *mtd, + struct nand_chip *chip, int feature_addr, + u8 *subfeature_para) +{ + struct nand_chip *nand = chip; + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + const int len = ONFI_SUBFEATURE_PARAM_LEN; + int rc; + + rc = ndf_build_pre_cmd(tn, NAND_CMD_SET_FEATURES, + 1, feature_addr, 0, 0); + if (rc) + return rc; + + memcpy(tn->buf.dmabuf, subfeature_para, len); + memset(tn->buf.dmabuf + len, 0, 8 - len); + + ndf_setup_dma(tn, 1, tn->buf.dmaaddr, 8); + + rc = ndf_queue_cmd_write(tn, 8); + if (rc) + return rc; + + rc = ndf_build_wait_busy(tn); + if (rc) + return rc; + + rc = ndf_build_post_cmd(tn, t2); + if (rc) + return rc; + + return 0; +} + +/* + * Read a page from NAND. If the buffer has room, the out of band + * data will be included. + */ +static int ndf_page_read(struct octeontx_nfc *tn, u64 page, int col, int len) +{ + debug("%s(%p, 0x%llx, 0x%x, 0x%x) active: %p\n", __func__, + tn, page, col, len, tn->controller.active); + struct nand_chip *nand = tn->controller.active; + struct octeontx_nand_chip *chip = to_otx_nand(nand); + int addr_bytes = chip->row_bytes + chip->col_bytes; + + memset(tn->buf.dmabuf, 0xff, len); + return ndf_read(tn, NAND_CMD_READ0, addr_bytes, + page, col, NAND_CMD_READSTART, len); +} + +/* Erase a NAND block */ +static int ndf_block_erase(struct octeontx_nfc *tn, u64 page_addr) +{ + struct nand_chip *nand = tn->controller.active; + struct octeontx_nand_chip *chip = to_otx_nand(nand); + int addr_bytes = chip->row_bytes; + int rc; + + rc = ndf_build_pre_cmd(tn, NAND_CMD_ERASE1, addr_bytes, + page_addr, 0, NAND_CMD_ERASE2); + if (rc) + return rc; + + /* Wait for R_B to signal erase is complete */ + rc = ndf_build_wait_busy(tn); + if (rc) + return rc; + + rc = ndf_build_post_cmd(tn, t2); + if (rc) + return rc; + + /* Wait until the command queue is idle */ + return ndf_wait_idle(tn); +} + +/* + * Write a page (or less) to NAND. + */ +static int ndf_page_write(struct octeontx_nfc *tn, int page) +{ + int len, rc; + struct nand_chip *nand = tn->controller.active; + struct octeontx_nand_chip *chip = to_otx_nand(nand); + int addr_bytes = chip->row_bytes + chip->col_bytes; + + len = tn->buf.data_len - tn->buf.data_index; + chip->oob_only = (tn->buf.data_index >= nand->mtd.writesize); + WARN_ON_ONCE(len & 0x7); + + ndf_setup_dma(tn, 1, tn->buf.dmaaddr + tn->buf.data_index, len); + rc = ndf_build_pre_cmd(tn, NAND_CMD_SEQIN, addr_bytes, page, 0, 0); + if (rc) + return rc; + + rc = ndf_queue_cmd_write(tn, len); + if (rc) + return rc; + + rc = ndf_queue_cmd_cle(tn, NAND_CMD_PAGEPROG); + if (rc) + return rc; + + /* Wait for R_B to signal program is complete */ + rc = ndf_build_wait_busy(tn); + if (rc) + return rc; + + rc = ndf_build_post_cmd(tn, t2); + if (rc) + return rc; + + /* Wait for the DMA to complete */ + rc = ndf_wait(tn); + if (rc) + return rc; + + /* Data transfer is done but NDF is not, it is waiting for R/B# */ + return ndf_wait_idle(tn); +} + +static void octeontx_nand_cmdfunc(struct mtd_info *mtd, unsigned int command, + int column, int page_addr) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct octeontx_nand_chip *octeontx_nand = to_otx_nand(nand); + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + int rc; + + tn->selected_chip = octeontx_nand->cs; + if (tn->selected_chip < 0 || tn->selected_chip >= NAND_MAX_CHIPS) { + dev_err(tn->dev, "invalid chip select\n"); + return; + } + + tn->use_status = false; + + pr_debug("%s(%p, 0x%x, 0x%x, 0x%x) cs: %d\n", __func__, mtd, command, + column, page_addr, tn->selected_chip); + switch (command) { + case NAND_CMD_READID: + tn->buf.data_index = 0; + octeontx_nand->oob_only = false; + rc = ndf_read(tn, command, 1, column, 0, 0, 8); + if (rc < 0) + dev_err(tn->dev, "READID failed with %d\n", rc); + else + tn->buf.data_len = rc; + break; + + case NAND_CMD_READOOB: + octeontx_nand->oob_only = true; + tn->buf.data_index = 0; + tn->buf.data_len = 0; + rc = ndf_page_read(tn, page_addr, column, mtd->oobsize); + if (rc < mtd->oobsize) + dev_err(tn->dev, "READOOB failed with %d\n", + tn->buf.data_len); + else + tn->buf.data_len = rc; + break; + + case NAND_CMD_READ0: + octeontx_nand->oob_only = false; + tn->buf.data_index = 0; + tn->buf.data_len = 0; + rc = ndf_page_read(tn, page_addr, column, + mtd->writesize + mtd->oobsize); + + if (rc < mtd->writesize + mtd->oobsize) + dev_err(tn->dev, "READ0 failed with %d\n", rc); + else + tn->buf.data_len = rc; + break; + + case NAND_CMD_STATUS: + /* used in oob/not states */ + tn->use_status = true; + rc = ndf_read(tn, command, 0, 0, 0, 0, 8); + if (rc < 0) + dev_err(tn->dev, "STATUS failed with %d\n", rc); + break; + + case NAND_CMD_RESET: + /* used in oob/not states */ + rc = octeontx_nand_reset(tn); + if (rc < 0) + dev_err(tn->dev, "RESET failed with %d\n", rc); + break; + + case NAND_CMD_PARAM: + octeontx_nand->oob_only = false; + tn->buf.data_index = 0; + rc = ndf_read(tn, command, 1, 0, 0, 0, + min(tn->buf.dmabuflen, 3 * 512)); + if (rc < 0) + dev_err(tn->dev, "PARAM failed with %d\n", rc); + else + tn->buf.data_len = rc; + break; + + case NAND_CMD_RNDOUT: + tn->buf.data_index = column; + break; + + case NAND_CMD_ERASE1: + if (ndf_block_erase(tn, page_addr)) + dev_err(tn->dev, "ERASE1 failed\n"); + break; + + case NAND_CMD_ERASE2: + /* We do all erase processing in the first command, so ignore + * this one. + */ + break; + + case NAND_CMD_SEQIN: + octeontx_nand->oob_only = (column >= mtd->writesize); + tn->buf.data_index = column; + tn->buf.data_len = column; + + octeontx_nand->selected_page = page_addr; + break; + + case NAND_CMD_PAGEPROG: + rc = ndf_page_write(tn, octeontx_nand->selected_page); + if (rc) + dev_err(tn->dev, "PAGEPROG failed with %d\n", rc); + break; + + case NAND_CMD_SET_FEATURES: + octeontx_nand->oob_only = false; + /* assume tn->buf.data_len == 4 of data has been set there */ + rc = octeontx_nand_set_features(mtd, nand, + page_addr, tn->buf.dmabuf); + if (rc) + dev_err(tn->dev, "SET_FEATURES failed with %d\n", rc); + break; + + case NAND_CMD_GET_FEATURES: + octeontx_nand->oob_only = false; + rc = octeontx_nand_get_features(mtd, nand, + page_addr, tn->buf.dmabuf); + if (!rc) { + tn->buf.data_index = 0; + tn->buf.data_len = 4; + } else { + dev_err(tn->dev, "GET_FEATURES failed with %d\n", rc); + } + break; + + default: + WARN_ON_ONCE(1); + dev_err(tn->dev, "unhandled nand cmd: %x\n", command); + } +} + +static int octeontx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) +{ + struct octeontx_nfc *tn = to_otx_nfc(chip->controller); + int ret; + + ret = ndf_wait_idle(tn); + return (ret < 0) ? -EIO : 0; +} + +/* check compatibility with ONFI timing mode#N, and optionally apply */ +/* TODO: Implement chipnr support? */ +static int octeontx_nand_setup_dat_intf(struct mtd_info *mtd, int chipnr, + const struct nand_data_interface *conf) +{ + static const bool check_only; + struct nand_chip *nand = mtd_to_nand(mtd); + struct octeontx_nand_chip *chip = to_otx_nand(nand); + static u64 t_wc_n[MAX_ONFI_MODE + 2]; /* cache a mode signature */ + int mode; /* deduced mode number, for reporting and restricting */ + int rc; + + /* + * Cache timing modes for reporting, and reducing needless change. + * + * Challenge: caller does not pass ONFI mode#, but reporting the mode + * and restricting to a maximum, or a list, are useful for diagnosing + * new hardware. So use tWC_min, distinct and monotonic across modes, + * to discover the requested/accepted mode number + */ + for (mode = MAX_ONFI_MODE; mode >= 0 && !t_wc_n[0]; mode--) { + const struct nand_sdr_timings *t; + + t = onfi_async_timing_mode_to_sdr_timings(mode); + if (!t) + continue; + t_wc_n[mode] = t->tWC_min; + } + + if (!conf) { + rc = -EINVAL; + } else if (check_only) { + rc = 0; + } else if (nand->data_interface && + chip->iface_set && chip->iface_mode == mode) { + /* + * Cases: + * - called from nand_reset, which clears DDR timing + * mode back to SDR. BUT if we're already in SDR, + * timing mode persists over resets. + * While mtd/nand layer only supports SDR, + * this is always safe. And this driver only supports SDR. + * + * - called from post-power-event nand_reset (maybe + * NFC+flash power down, or system hibernate. + * Address this when CONFIG_PM support added + */ + rc = 0; + } else { + rc = octeontx_nfc_chip_set_timings(chip, &conf->timings.sdr); + if (!rc) { + chip->iface_mode = mode; + chip->iface_set = true; + } + } + return rc; +} + +static void octeontx_bch_reset(void) +{ +} + +/* + * Given a page, calculate the ECC code + * + * chip: Pointer to NAND chip data structure + * buf: Buffer to calculate ECC on + * code: Buffer to hold ECC data + * + * Return 0 on success or -1 on failure + */ +static int octeontx_nand_bch_calculate_ecc_internal(struct mtd_info *mtd, + dma_addr_t ihandle, + u8 *code) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + int rc; + int i; + static u8 *ecc_buffer; + static int ecc_size; + static unsigned long ecc_handle; + union bch_resp *r = tn->bch_resp; + + if (!ecc_buffer || ecc_size < nand->ecc.size) { + ecc_size = nand->ecc.size; + ecc_buffer = dma_alloc_coherent(ecc_size, + (unsigned long *)&ecc_handle); + } + + memset(ecc_buffer, 0, nand->ecc.bytes); + + r->u16 = 0; + __iowmb(); /* flush done=0 before making request */ + + rc = octeontx_bch_encode(bch_vf, ihandle, nand->ecc.size, + nand->ecc.strength, + (dma_addr_t)ecc_handle, tn->bch_rhandle); + + if (!rc) { + octeontx_bch_wait(bch_vf, r, tn->bch_rhandle); + } else { + dev_err(tn->dev, "octeontx_bch_encode failed\n"); + return -1; + } + + if (!r->s.done || r->s.uncorrectable) { + dev_err(tn->dev, + "%s timeout, done:%d uncorr:%d corr:%d erased:%d\n", + __func__, r->s.done, r->s.uncorrectable, + r->s.num_errors, r->s.erased); + octeontx_bch_reset(); + return -1; + } + + memcpy(code, ecc_buffer, nand->ecc.bytes); + + for (i = 0; i < nand->ecc.bytes; i++) + code[i] ^= tn->eccmask[i]; + + return tn->bch_resp->s.num_errors; +} + +/* + * Given a page, calculate the ECC code + * + * mtd: MTD block structure + * dat: raw data (unused) + * ecc_code: buffer for ECC + */ +static int octeontx_nand_bch_calculate(struct mtd_info *mtd, + const u8 *dat, u8 *ecc_code) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + dma_addr_t handle = dma_map_single((u8 *)dat, + nand->ecc.size, DMA_TO_DEVICE); + int ret; + + ret = octeontx_nand_bch_calculate_ecc_internal(mtd, handle, + (void *)ecc_code); + + return ret; +} + +/* + * Detect and correct multi-bit ECC for a page + * + * mtd: MTD block structure + * dat: raw data read from the chip + * read_ecc: ECC from the chip (unused) + * isnull: unused + * + * Returns number of bits corrected or -1 if unrecoverable + */ +static int octeontx_nand_bch_correct(struct mtd_info *mtd, u_char *dat, + u_char *read_ecc, u_char *isnull) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + int i = nand->ecc.size + nand->ecc.bytes; + static u8 *data_buffer; + static dma_addr_t ihandle; + static int buffer_size; + dma_addr_t ohandle; + union bch_resp *r = tn->bch_resp; + int rc; + + if (i > buffer_size) { + if (buffer_size) + free(data_buffer); + data_buffer = dma_alloc_coherent(i, + (unsigned long *)&ihandle); + if (!data_buffer) { + dev_err(tn->dev, + "%s: Could not allocate %d bytes for buffer\n", + __func__, i); + goto error; + } + buffer_size = i; + } + + memcpy(data_buffer, dat, nand->ecc.size); + memcpy(data_buffer + nand->ecc.size, read_ecc, nand->ecc.bytes); + + for (i = 0; i < nand->ecc.bytes; i++) + data_buffer[nand->ecc.size + i] ^= tn->eccmask[i]; + + r->u16 = 0; + __iowmb(); /* flush done=0 before making request */ + + ohandle = dma_map_single(dat, nand->ecc.size, DMA_FROM_DEVICE); + rc = octeontx_bch_decode(bch_vf, ihandle, nand->ecc.size, + nand->ecc.strength, ohandle, tn->bch_rhandle); + + if (!rc) + octeontx_bch_wait(bch_vf, r, tn->bch_rhandle); + + if (rc) { + dev_err(tn->dev, "octeontx_bch_decode failed\n"); + goto error; + } + + if (!r->s.done) { + dev_err(tn->dev, "Error: BCH engine timeout\n"); + octeontx_bch_reset(); + goto error; + } + + if (r->s.erased) { + debug("Info: BCH block is erased\n"); + return 0; + } + + if (r->s.uncorrectable) { + debug("Cannot correct NAND block, response: 0x%x\n", + r->u16); + goto error; + } + + return r->s.num_errors; + +error: + debug("Error performing bch correction\n"); + return -1; +} + +void octeontx_nand_bch_hwctl(struct mtd_info *mtd, int mode) +{ + /* Do nothing. */ +} + +static int octeontx_nand_hw_bch_read_page(struct mtd_info *mtd, + struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + int i, eccsize = chip->ecc.size, ret; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + u8 *p; + u8 *ecc_code = chip->buffers->ecccode; + unsigned int max_bitflips = 0; + + /* chip->read_buf() insists on sequential order, we do OOB first */ + memcpy(chip->oob_poi, tn->buf.dmabuf + mtd->writesize, mtd->oobsize); + + /* Use private buffer as input for ECC correction */ + p = tn->buf.dmabuf; + + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { + int stat; + + debug("Correcting block offset %lx, ecc offset %x\n", + p - buf, i); + stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); + + if (stat < 0) { + mtd->ecc_stats.failed++; + debug("Cannot correct NAND page %d\n", page); + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + /* Copy corrected data to caller's buffer now */ + memcpy(buf, tn->buf.dmabuf, mtd->writesize); + + return max_bitflips; +} + +static int octeontx_nand_hw_bch_write_page(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + struct octeontx_nfc *tn = to_otx_nfc(chip->controller); + int i, eccsize = chip->ecc.size, ret; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + const u8 *p; + u8 *ecc_calc = chip->buffers->ecccalc; + + debug("%s(buf?%p, oob%d p%x)\n", + __func__, buf, oob_required, page); + for (i = 0; i < chip->ecc.total; i++) + ecc_calc[i] = 0xFF; + + /* Copy the page data from caller's buffers to private buffer */ + chip->write_buf(mtd, buf, mtd->writesize); + /* Use private date as source for ECC calculation */ + p = tn->buf.dmabuf; + + /* Hardware ECC calculation */ + for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { + int ret; + + ret = chip->ecc.calculate(mtd, p, &ecc_calc[i]); + + if (ret < 0) + debug("calculate(mtd, p?%p, &ecc_calc[%d]?%p) returned %d\n", + p, i, &ecc_calc[i], ret); + + debug("block offset %lx, ecc offset %x\n", p - buf, i); + } + + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + /* Store resulting OOB into private buffer, will be sent to HW */ + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + +/** + * nand_write_page_raw - [INTERN] raw page write function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * @page: page number to write + * + * Not for syndrome calculating ECC controllers, which use a special oob layout. + */ +static int octeontx_nand_write_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + chip->write_buf(mtd, buf, mtd->writesize); + if (oob_required) + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + +/** + * octeontx_nand_write_oob_std - [REPLACEABLE] the most common OOB data write + * function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @page: page number to write + */ +static int octeontx_nand_write_oob_std(struct mtd_info *mtd, + struct nand_chip *chip, + int page) +{ + int status = 0; + const u8 *buf = chip->oob_poi; + int length = mtd->oobsize; + + chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); + chip->write_buf(mtd, buf, length); + /* Send command to program the OOB data */ + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + + status = chip->waitfunc(mtd, chip); + + return status & NAND_STATUS_FAIL ? -EIO : 0; +} + +/** + * octeontx_nand_read_page_raw - [INTERN] read raw page data without ecc + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * Not for syndrome calculating ECC controllers, which use a special oob layout. + */ +static int octeontx_nand_read_page_raw(struct mtd_info *mtd, + struct nand_chip *chip, + u8 *buf, int oob_required, int page) +{ + chip->read_buf(mtd, buf, mtd->writesize); + if (oob_required) + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + return 0; +} + +static int octeontx_nand_read_oob_std(struct mtd_info *mtd, + struct nand_chip *chip, + int page) + +{ + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + return 0; +} + +static int octeontx_nand_calc_bch_ecc_strength(struct nand_chip *nand) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + int nsteps = mtd->writesize / ecc->size; + int oobchunk = mtd->oobsize / nsteps; + + /* ecc->strength determines ecc_level and OOB's ecc_bytes. */ + const u8 strengths[] = {4, 8, 16, 24, 32, 40, 48, 56, 60, 64}; + /* first set the desired ecc_level to match strengths[] */ + int index = ARRAY_SIZE(strengths) - 1; + int need; + + while (index > 0 && !(ecc->options & NAND_ECC_MAXIMIZE) && + strengths[index - 1] >= ecc->strength) + index--; + + do { + need = DIV_ROUND_UP(15 * strengths[index], 8); + if (need <= oobchunk - 2) + break; + } while (index > 0); + + debug("%s: steps ds: %d, strength ds: %d\n", __func__, + nand->ecc_step_ds, nand->ecc_strength_ds); + ecc->strength = strengths[index]; + ecc->bytes = need; + debug("%s: strength: %d, bytes: %d\n", __func__, ecc->strength, + ecc->bytes); + + if (!tn->eccmask) + tn->eccmask = devm_kzalloc(tn->dev, ecc->bytes, GFP_KERNEL); + if (!tn->eccmask) + return -ENOMEM; + + return 0; +} + +/* sample the BCH signature of an erased (all 0xff) page, + * to XOR into all page traffic, so erased pages have no ECC errors + */ +static int octeontx_bch_save_empty_eccmask(struct nand_chip *nand) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct octeontx_nfc *tn = to_otx_nfc(nand->controller); + unsigned int eccsize = nand->ecc.size; + unsigned int eccbytes = nand->ecc.bytes; + u8 erased_ecc[eccbytes]; + unsigned long erased_handle; + unsigned char *erased_page = dma_alloc_coherent(eccsize, + &erased_handle); + int i; + int rc = 0; + + if (!erased_page) + return -ENOMEM; + + memset(erased_page, 0xff, eccsize); + memset(erased_ecc, 0, eccbytes); + + rc = octeontx_nand_bch_calculate_ecc_internal(mtd, + (dma_addr_t)erased_handle, + erased_ecc); + + free(erased_page); + + for (i = 0; i < eccbytes; i++) + tn->eccmask[i] = erased_ecc[i] ^ 0xff; + + return rc; +} + +static void octeontx_nfc_chip_sizing(struct nand_chip *nand) +{ + struct octeontx_nand_chip *chip = to_otx_nand(nand); + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; + + chip->row_bytes = nand->onfi_params.addr_cycles & 0xf; + chip->col_bytes = nand->onfi_params.addr_cycles >> 4; + debug("%s(%p) row bytes: %d, col bytes: %d, ecc mode: %d\n", + __func__, nand, chip->row_bytes, chip->col_bytes, ecc->mode); + + /* + * HW_BCH using OcteonTX BCH engine, or SOFT_BCH laid out in + * HW_BCH-compatible fashion, depending on devtree advice + * and kernel config. + * BCH/NFC hardware capable of subpage ops, not implemented. + */ + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); + nand->options |= NAND_NO_SUBPAGE_WRITE; + debug("%s: start steps: %d, size: %d, bytes: %d\n", + __func__, ecc->steps, ecc->size, ecc->bytes); + debug("%s: step ds: %d, strength ds: %d\n", __func__, + nand->ecc_step_ds, nand->ecc_strength_ds); + + if (ecc->mode != NAND_ECC_NONE) { + int nsteps = ecc->steps ? ecc->steps : 1; + + if (ecc->size && ecc->size != mtd->writesize) + nsteps = mtd->writesize / ecc->size; + else if (mtd->writesize > def_ecc_size && + !(mtd->writesize & (def_ecc_size - 1))) + nsteps = mtd->writesize / def_ecc_size; + ecc->steps = nsteps; + ecc->size = mtd->writesize / nsteps; + ecc->bytes = mtd->oobsize / nsteps; + + if (nand->ecc_strength_ds) + ecc->strength = nand->ecc_strength_ds; + if (nand->ecc_step_ds) + ecc->size = nand->ecc_step_ds; + /* + * no subpage ops, but set subpage-shift to match ecc->steps + * so mtd_nandbiterrs tests appropriate boundaries + */ + if (!mtd->subpage_sft && !(ecc->steps & (ecc->steps - 1))) + mtd->subpage_sft = fls(ecc->steps) - 1; + + if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) { + debug("%s: ecc mode: %d\n", __func__, ecc->mode); + if (ecc->mode != NAND_ECC_SOFT && + !octeontx_nand_calc_bch_ecc_strength(nand)) { + struct octeontx_nfc *tn = + to_otx_nfc(nand->controller); + + debug("Using hardware BCH engine support\n"); + ecc->mode = NAND_ECC_HW_SYNDROME; + ecc->read_page = octeontx_nand_hw_bch_read_page; + ecc->write_page = + octeontx_nand_hw_bch_write_page; + ecc->read_page_raw = + octeontx_nand_read_page_raw; + ecc->write_page_raw = + octeontx_nand_write_page_raw; + ecc->read_oob = octeontx_nand_read_oob_std; + ecc->write_oob = octeontx_nand_write_oob_std; + + ecc->calculate = octeontx_nand_bch_calculate; + ecc->correct = octeontx_nand_bch_correct; + ecc->hwctl = octeontx_nand_bch_hwctl; + + debug("NAND chip %d using hw_bch\n", + tn->selected_chip); + debug(" %d bytes ECC per %d byte block\n", + ecc->bytes, ecc->size); + debug(" for %d bits of correction per block.", + ecc->strength); + octeontx_nand_calc_ecc_layout(nand); + octeontx_bch_save_empty_eccmask(nand); + } + } + } +} + +static int octeontx_nfc_chip_init(struct octeontx_nfc *tn, struct udevice *dev, + ofnode node) +{ + struct octeontx_nand_chip *chip; + struct nand_chip *nand; + struct mtd_info *mtd; + int ret; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + debug("%s: Getting chip select\n", __func__); + ret = ofnode_read_s32(node, "reg", &chip->cs); + if (ret) { + dev_err(dev, "could not retrieve reg property: %d\n", ret); + return ret; + } + + if (chip->cs >= NAND_MAX_CHIPS) { + dev_err(dev, "invalid reg value: %u (max CS = 7)\n", chip->cs); + return -EINVAL; + } + debug("%s: chip select: %d\n", __func__, chip->cs); + nand = &chip->nand; + nand->controller = &tn->controller; + if (!tn->controller.active) + tn->controller.active = nand; + + debug("%s: Setting flash node\n", __func__); + nand_set_flash_node(nand, node); + + nand->options = 0; + nand->select_chip = octeontx_nand_select_chip; + nand->cmdfunc = octeontx_nand_cmdfunc; + nand->waitfunc = octeontx_nand_waitfunc; + nand->read_byte = octeontx_nand_read_byte; + nand->read_buf = octeontx_nand_read_buf; + nand->write_buf = octeontx_nand_write_buf; + nand->onfi_set_features = octeontx_nand_set_features; + nand->onfi_get_features = octeontx_nand_get_features; + nand->setup_data_interface = octeontx_nand_setup_dat_intf; + + mtd = nand_to_mtd(nand); + debug("%s: mtd: %p\n", __func__, mtd); + mtd->dev->parent = dev; + + debug("%s: NDF_MISC: 0x%llx\n", __func__, + readq(tn->base + NDF_MISC)); + + /* TODO: support more then 1 chip */ + debug("%s: Scanning identification\n", __func__); + ret = nand_scan_ident(mtd, 1, NULL); + if (ret) + return ret; + + debug("%s: Sizing chip\n", __func__); + octeontx_nfc_chip_sizing(nand); + + debug("%s: Scanning tail\n", __func__); + ret = nand_scan_tail(mtd); + if (ret) { + dev_err(dev, "nand_scan_tail failed: %d\n", ret); + return ret; + } + + debug("%s: Registering mtd\n", __func__); + ret = nand_register(0, mtd); + + debug("%s: Adding tail\n", __func__); + list_add_tail(&chip->node, &tn->chips); + return 0; +} + +static int octeontx_nfc_chips_init(struct octeontx_nfc *tn) +{ + struct udevice *dev = tn->dev; + ofnode node = dev->node; + ofnode nand_node; + int nr_chips = of_get_child_count(node); + int ret; + + debug("%s: node: %s\n", __func__, ofnode_get_name(node)); + debug("%s: %d chips\n", __func__, nr_chips); + if (nr_chips > NAND_MAX_CHIPS) { + dev_err(dev, "too many NAND chips: %d\n", nr_chips); + return -EINVAL; + } + + if (!nr_chips) { + debug("no DT NAND chips found\n"); + return -ENODEV; + } + + pr_info("%s: scanning %d chips DTs\n", __func__, nr_chips); + + ofnode_for_each_subnode(nand_node, node) { + debug("%s: Calling octeontx_nfc_chip_init(%p, %s, %ld)\n", + __func__, tn, dev->name, nand_node.of_offset); + ret = octeontx_nfc_chip_init(tn, dev, nand_node); + if (ret) + return ret; + } + return 0; +} + +/* Reset NFC and initialize registers. */ +static int octeontx_nfc_init(struct octeontx_nfc *tn) +{ + const struct nand_sdr_timings *timings; + u64 ndf_misc; + int rc; + + /* Initialize values and reset the fifo */ + ndf_misc = readq(tn->base + NDF_MISC); + + ndf_misc &= ~NDF_MISC_EX_DIS; + ndf_misc |= (NDF_MISC_BT_DIS | NDF_MISC_RST_FF); + writeq(ndf_misc, tn->base + NDF_MISC); + debug("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC)); + + /* Bring the fifo out of reset */ + ndf_misc &= ~(NDF_MISC_RST_FF); + + /* Maximum of co-processor cycles for glitch filtering */ + ndf_misc |= FIELD_PREP(NDF_MISC_WAIT_CNT, 0x3f); + + writeq(ndf_misc, tn->base + NDF_MISC); + + /* Set timing parameters to onfi mode 0 for probing */ + timings = onfi_async_timing_mode_to_sdr_timings(0); + if (IS_ERR(timings)) + return PTR_ERR(timings); + rc = set_default_timings(tn, timings); + if (rc) + return rc; + + return 0; +} + +static int octeontx_pci_nand_probe(struct udevice *dev) +{ + struct octeontx_nfc *tn = dev_get_priv(dev); + int ret; + static bool probe_done; + + debug("%s(%s) tn: %p\n", __func__, dev->name, tn); + if (probe_done) + return 0; + + if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) { + bch_vf = octeontx_bch_getv(); + if (!bch_vf) { + struct octeontx_probe_device *probe_dev; + + debug("%s: bch not yet initialized\n", __func__); + probe_dev = calloc(sizeof(*probe_dev), 1); + if (!probe_dev) { + printf("%s: Out of memory\n", __func__); + return -ENOMEM; + } + probe_dev->dev = dev; + INIT_LIST_HEAD(&probe_dev->list); + list_add_tail(&probe_dev->list, + &octeontx_pci_nand_deferred_devices); + debug("%s: Defering probe until after BCH initialization\n", + __func__); + return 0; + } + } + + tn->dev = dev; + INIT_LIST_HEAD(&tn->chips); + + tn->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM); + if (!tn->base) { + ret = -EINVAL; + goto release; + } + debug("%s: bar at %p\n", __func__, tn->base); + tn->buf.dmabuflen = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE; + tn->buf.dmabuf = dma_alloc_coherent(tn->buf.dmabuflen, + (unsigned long *)&tn->buf.dmaaddr); + if (!tn->buf.dmabuf) { + ret = -ENOMEM; + debug("%s: Could not allocate DMA buffer\n", __func__); + goto unclk; + } + + /* one hw-bch response, for one outstanding transaction */ + tn->bch_resp = dma_alloc_coherent(sizeof(*tn->bch_resp), + (unsigned long *)&tn->bch_rhandle); + + tn->stat = dma_alloc_coherent(8, (unsigned long *)&tn->stat_addr); + if (!tn->stat || !tn->bch_resp) { + debug("%s: Could not allocate bch status or response\n", + __func__); + ret = -ENOMEM; + goto unclk; + } + + debug("%s: Calling octeontx_nfc_init()\n", __func__); + octeontx_nfc_init(tn); + debug("%s: Initializing chips\n", __func__); + ret = octeontx_nfc_chips_init(tn); + debug("%s: init chips ret: %d\n", __func__, ret); + if (ret) { + if (ret != -ENODEV) + dev_err(dev, "failed to init nand chips\n"); + goto unclk; + } + dev_info(dev, "probed\n"); + return 0; + +unclk: +release: + return ret; +} + +int octeontx_pci_nand_disable(struct udevice *dev) +{ + struct octeontx_nfc *tn = dev_get_priv(dev); + u64 dma_cfg; + u64 ndf_misc; + + debug("%s: Disabling NAND device %s\n", __func__, dev->name); + dma_cfg = readq(tn->base + NDF_DMA_CFG); + dma_cfg &= ~NDF_DMA_CFG_EN; + dma_cfg |= NDF_DMA_CFG_CLR; + writeq(dma_cfg, tn->base + NDF_DMA_CFG); + + /* Disable execution and put FIFO in reset mode */ + ndf_misc = readq(tn->base + NDF_MISC); + ndf_misc |= NDF_MISC_EX_DIS | NDF_MISC_RST_FF; + writeq(ndf_misc, tn->base + NDF_MISC); + ndf_misc &= ~NDF_MISC_RST_FF; + writeq(ndf_misc, tn->base + NDF_MISC); +#ifdef DEBUG + printf("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC)); +#endif + /* Clear any interrupts and enable bits */ + writeq(~0ull, tn->base + NDF_INT_ENA_W1C); + writeq(~0ull, tn->base + NDF_INT); + debug("%s: NDF_ST_REG: 0x%llx\n", __func__, + readq(tn->base + NDF_ST_REG)); + return 0; +} + +/** + * Since it's possible (and even likely) that the NAND device will be probed + * before the BCH device has been probed, we may need to defer the probing. + * + * In this case, the initial probe returns success but the actual probing + * is deferred until the BCH VF has been probed. + * + * @return 0 for success, otherwise error + */ +int octeontx_pci_nand_deferred_probe(void) +{ + int rc = 0; + struct octeontx_probe_device *pdev; + + debug("%s: Performing deferred probing\n", __func__); + list_for_each_entry(pdev, &octeontx_pci_nand_deferred_devices, list) { + debug("%s: Probing %s\n", __func__, pdev->dev->name); + pdev->dev->flags &= ~DM_FLAG_ACTIVATED; + rc = device_probe(pdev->dev); + if (rc && rc != -ENODEV) { + printf("%s: Error %d with deferred probe of %s\n", + __func__, rc, pdev->dev->name); + break; + } + } + return rc; +} + +static const struct pci_device_id octeontx_nfc_pci_id_table[] = { + { PCI_VDEVICE(CAVIUM, 0xA04F) }, + {} +}; + +static int octeontx_nand_ofdata_to_platdata(struct udevice *dev) +{ + return 0; +} + +static const struct udevice_id octeontx_nand_ids[] = { + { .compatible = "cavium,cn8130-nand" }, + { }, +}; + +U_BOOT_DRIVER(octeontx_pci_nand) = { + .name = OCTEONTX_NAND_DRIVER_NAME, + .id = UCLASS_MTD, + .of_match = of_match_ptr(octeontx_nand_ids), + .ofdata_to_platdata = octeontx_nand_ofdata_to_platdata, + .probe = octeontx_pci_nand_probe, + .priv_auto_alloc_size = sizeof(struct octeontx_nfc), + .remove = octeontx_pci_nand_disable, + .flags = DM_FLAG_OS_PREPARE, +}; + +U_BOOT_PCI_DEVICE(octeontx_pci_nand, octeontx_nfc_pci_id_table); + +void board_nand_init(void) +{ + struct udevice *dev; + int ret; + + if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) { + ret = uclass_get_device_by_driver(UCLASS_MISC, + DM_GET_DRIVER(octeontx_pci_bchpf), + &dev); + if (ret && ret != -ENODEV) { + pr_err("Failed to initialize OcteonTX BCH PF controller. (error %d)\n", + ret); + } + ret = uclass_get_device_by_driver(UCLASS_MISC, + DM_GET_DRIVER(octeontx_pci_bchvf), + &dev); + if (ret && ret != -ENODEV) { + pr_err("Failed to initialize OcteonTX BCH VF controller. (error %d)\n", + ret); + } + } + + ret = uclass_get_device_by_driver(UCLASS_MTD, + DM_GET_DRIVER(octeontx_pci_nand), + &dev); + if (ret && ret != -ENODEV) + pr_err("Failed to initialize OcteonTX NAND controller. (error %d)\n", + ret); +} diff --git a/drivers/mtd/nand/raw/pxa3xx_nand.c b/drivers/mtd/nand/raw/pxa3xx_nand.c index 5fb3081c839..361a9e32935 100644 --- a/drivers/mtd/nand/raw/pxa3xx_nand.c +++ b/drivers/mtd/nand/raw/pxa3xx_nand.c @@ -22,6 +22,10 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> #include <linux/types.h> +#include <syscon.h> +#include <regmap.h> +#include <dm/uclass.h> +#include <dm/read.h> #include "pxa3xx_nand.h" @@ -117,6 +121,10 @@ DECLARE_GLOBAL_DATA_PTR; #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */ #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */ +/* System control register and bit to enable NAND on some SoCs */ +#define GENCONF_SOC_DEVICE_MUX 0x208 +#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0) + /* * This should be large enough to read 'ONFI' and 'JEDEC'. * Let's use 7 bytes, which is the maximum ID count supported @@ -157,6 +165,7 @@ enum { enum pxa3xx_nand_variant { PXA3XX_NAND_VARIANT_PXA, PXA3XX_NAND_VARIANT_ARMADA370, + PXA3XX_NAND_VARIANT_ARMADA_8K, }; struct pxa3xx_nand_host { @@ -417,10 +426,21 @@ static struct nand_ecclayout ecc_layout_8KB_bch8bit = { /* convert nano-seconds to nand flash controller clock cycles */ #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) -static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void) +static const struct udevice_id pxa3xx_nand_dt_ids[] = { + { + .compatible = "marvell,mvebu-pxa3xx-nand", + .data = PXA3XX_NAND_VARIANT_ARMADA370, + }, + { + .compatible = "marvell,armada-8k-nand-controller", + .data = PXA3XX_NAND_VARIANT_ARMADA_8K, + }, + {} +}; + +static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(struct udevice *dev) { - /* We only support the Armada 370/XP/38x for now */ - return PXA3XX_NAND_VARIANT_ARMADA370; + return dev_get_driver_data(dev); } static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host, @@ -697,7 +717,8 @@ static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info) info->retcode = ERR_UNCORERR; if (status & NDSR_CORERR) { info->retcode = ERR_CORERR; - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 && + if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) && info->ecc_bch) info->ecc_err_cnt = NDSR_ERR_CNT(status); else @@ -752,7 +773,8 @@ static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info) nand_writel(info, NDCB0, info->ndcb2); /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */ - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) nand_writel(info, NDCB0, info->ndcb3); } @@ -1666,7 +1688,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) } /* Device detection must be done with ECC disabled */ - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) nand_writel(info, NDECCCTRL, 0x0); if (nand_scan_ident(mtd, 1, NULL)) @@ -1716,7 +1739,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) * (aka split) command handling, */ if (mtd->writesize > info->chunk_size) { - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) { chip->cmdfunc = nand_cmdfunc_extended; } else { dev_err(mtd->dev, @@ -1752,19 +1776,19 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) return nand_scan_tail(mtd); } -static int alloc_nand_resource(struct pxa3xx_nand_info *info) +static int alloc_nand_resource(struct udevice *dev, struct pxa3xx_nand_info *info) { struct pxa3xx_nand_platform_data *pdata; struct pxa3xx_nand_host *host; struct nand_chip *chip = NULL; struct mtd_info *mtd; - int ret, cs; + int cs; pdata = info->pdata; if (pdata->num_cs <= 0) return -ENODEV; - info->variant = pxa3xx_nand_get_variant(); + info->variant = pxa3xx_nand_get_variant(dev); for (cs = 0; cs < pdata->num_cs; cs++) { chip = (struct nand_chip *) ((u8 *)&info[1] + sizeof(*host) * cs); @@ -1794,97 +1818,87 @@ static int alloc_nand_resource(struct pxa3xx_nand_info *info) /* Allocate a buffer to allow flash detection */ info->buf_size = INIT_BUFFER_SIZE; info->data_buff = kmalloc(info->buf_size, GFP_KERNEL); - if (info->data_buff == NULL) { - ret = -ENOMEM; - goto fail_disable_clk; - } + if (info->data_buff == NULL) + return -ENOMEM; /* initialize all interrupts to be disabled */ disable_int(info, NDSR_MASK); - return 0; + /* + * Some SoCs like A7k/A8k need to enable manually the NAND + * controller to avoid being bootloader dependent. This is done + * through the use of a single bit in the System Functions registers. + */ + if (pxa3xx_nand_get_variant(dev) == PXA3XX_NAND_VARIANT_ARMADA_8K) { + struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle( + dev, "marvell,system-controller"); + u32 reg; - kfree(info->data_buff); -fail_disable_clk: - return ret; + if (IS_ERR(sysctrl_base)) + return PTR_ERR(sysctrl_base); + + regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, ®); + reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN; + regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg); + } + + return 0; } -static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info) +static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *info) { struct pxa3xx_nand_platform_data *pdata; - const void *blob = gd->fdt_blob; - int node = -1; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; - /* Get address decoding nodes from the FDT blob */ - do { - node = fdt_node_offset_by_compatible(blob, node, - "marvell,mvebu-pxa3xx-nand"); - if (node < 0) - break; - - /* Bypass disabeld nodes */ - if (!fdtdec_get_is_enabled(blob, node)) - continue; - - /* Get the first enabled NAND controler base address */ - info->mmio_base = - (void __iomem *)fdtdec_get_addr_size_auto_noparent( - blob, node, "reg", 0, NULL, true); + info->mmio_base = dev_read_addr_ptr(dev); - pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1); - if (pdata->num_cs != 1) { - pr_err("pxa3xx driver supports single CS only\n"); - break; - } - - if (fdtdec_get_bool(blob, node, "nand-enable-arbiter")) - pdata->enable_arbiter = 1; - - if (fdtdec_get_bool(blob, node, "nand-keep-config")) - pdata->keep_config = 1; + pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1); + if (pdata->num_cs != 1) { + pr_err("pxa3xx driver supports single CS only\n"); + return -EINVAL; + } - /* - * ECC parameters. - * If these are not set, they will be selected according - * to the detected flash type. - */ - /* ECC strength */ - pdata->ecc_strength = fdtdec_get_int(blob, node, - "nand-ecc-strength", 0); + if (dev_read_bool(dev, "nand-enable-arbiter")) + pdata->enable_arbiter = 1; - /* ECC step size */ - pdata->ecc_step_size = fdtdec_get_int(blob, node, - "nand-ecc-step-size", 0); + if (dev_read_bool(dev, "nand-keep-config")) + pdata->keep_config = 1; - info->pdata = pdata; + /* + * ECC parameters. + * If these are not set, they will be selected according + * to the detected flash type. + */ + /* ECC strength */ + pdata->ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 0); - /* Currently support only a single NAND controller */ - return 0; + /* ECC step size */ + pdata->ecc_step_size = dev_read_u32_default(dev, "nand-ecc-step-size", + 0); - } while (node >= 0); + info->pdata = pdata; - return -EINVAL; + return 0; } -static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info) +static int pxa3xx_nand_probe(struct udevice *dev) { - struct mtd_info *mtd = &info->controller.active->mtd; struct pxa3xx_nand_platform_data *pdata; int ret, cs, probe_success; + struct pxa3xx_nand_info *info = dev_get_priv(dev); - ret = pxa3xx_nand_probe_dt(info); + ret = pxa3xx_nand_probe_dt(dev, info); if (ret) return ret; pdata = info->pdata; - ret = alloc_nand_resource(info); + ret = alloc_nand_resource(dev, info); if (ret) { - dev_err(mtd->dev, "alloc nand resource failed\n"); + dev_err(dev, "alloc nand resource failed\n"); return ret; } @@ -1918,22 +1932,24 @@ static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info) return 0; } -/* - * Main initialization routine - */ +U_BOOT_DRIVER(pxa3xx_nand) = { + .name = "pxa3xx-nand", + .id = UCLASS_MTD, + .of_match = pxa3xx_nand_dt_ids, + .probe = pxa3xx_nand_probe, + .priv_auto_alloc_size = sizeof(struct pxa3xx_nand_info) + + sizeof(struct pxa3xx_nand_host) * CONFIG_SYS_MAX_NAND_DEVICE, +}; + void board_nand_init(void) { - struct pxa3xx_nand_info *info; - struct pxa3xx_nand_host *host; + struct udevice *dev; int ret; - info = kzalloc(sizeof(*info) + - sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE, - GFP_KERNEL); - if (!info) - return; - - ret = pxa3xx_nand_probe(info); - if (ret) - return; + ret = uclass_get_device_by_driver(UCLASS_MTD, + DM_GET_DRIVER(pxa3xx_nand), &dev); + if (ret && ret != -ENODEV) { + pr_err("Failed to initialize %s. (error %d)\n", dev->name, + ret); + } } diff --git a/drivers/mtd/spi/sf_internal.h b/drivers/mtd/spi/sf_internal.h index dabd40a4cc1..9ceff0e7c12 100644 --- a/drivers/mtd/spi/sf_internal.h +++ b/drivers/mtd/spi/sf_internal.h @@ -67,7 +67,7 @@ struct flash_info { #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ #define USE_CLSR BIT(14) /* use CLSR command */ #define SPI_NOR_HAS_SST26LOCK BIT(15) /* Flash supports lock/unlock via BPR */ -#define SPI_NOR_OCTAL_READ BIT(16) /* Flash supports Octal Read */ +#define SPI_NOR_OCTAL_READ BIT(16) /* Flash supports Octal Read */ }; extern const struct flash_info spi_nor_ids[]; diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig new file mode 100644 index 00000000000..f15ee4f833f --- /dev/null +++ b/drivers/mux/Kconfig @@ -0,0 +1,25 @@ +menu "Multiplexer drivers" + +config MULTIPLEXER + bool "Multiplexer Support" + depends on DM + help + The mux framework is a minimalistic subsystem that handles multiplexer + controllers. It provides the same API as Linux and mux drivers should + be portable with a minimum effort. + +if MULTIPLEXER + +config MUX_MMIO + bool "MMIO register bitfield-controlled Multiplexer" + depends on MULTIPLEXER && SYSCON + help + MMIO register bitfield-controlled Multiplexer controller. + + The driver builds multiplexer controllers for bitfields in a syscon + register. For N bit wide bitfields, there will be 2^N possible + multiplexer states. + +endif + +endmenu diff --git a/drivers/mux/Makefile b/drivers/mux/Makefile new file mode 100644 index 00000000000..78ebf04c7a9 --- /dev/null +++ b/drivers/mux/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# (C) Copyright 2019 +# Jean-Jacques Hiblot <jjhiblot@ti.com> + +obj-$(CONFIG_$(SPL_)MULTIPLEXER) += mux-uclass.o +obj-$(CONFIG_$(SPL_)MUX_MMIO) += mmio.o diff --git a/drivers/mux/mmio.c b/drivers/mux/mmio.c new file mode 100644 index 00000000000..b9868505a3b --- /dev/null +++ b/drivers/mux/mmio.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MMIO register bitfield-controlled multiplexer driver + * Based on the linux mmio multiplexer driver + * + * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de> + * Copyright (C) 2019 Texas Instrument, Jean-jacques Hiblot <jjhiblot@ti.com> + */ +#include <common.h> +#include <dm.h> +#include <mux-internal.h> +#include <regmap.h> +#include <syscon.h> +#include <dm/device.h> +#include <dm/device_compat.h> +#include <dm/read.h> +#include <dm/devres.h> +#include <dt-bindings/mux/mux.h> +#include <linux/bitops.h> + +static int mux_mmio_set(struct mux_control *mux, int state) +{ + struct regmap_field **fields = dev_get_priv(mux->dev); + + return regmap_field_write(fields[mux_control_get_index(mux)], state); +} + +static const struct mux_control_ops mux_mmio_ops = { + .set = mux_mmio_set, +}; + +static const struct udevice_id mmio_mux_of_match[] = { + { .compatible = "mmio-mux" }, + { /* sentinel */ }, +}; + +static int mmio_mux_probe(struct udevice *dev) +{ + struct regmap_field **fields; + struct mux_chip *mux_chip = dev_get_uclass_priv(dev); + struct regmap *regmap; + u32 *mux_reg_masks; + u32 *idle_states; + int num_fields; + int ret; + int i; + + regmap = syscon_node_to_regmap(dev_ofnode(dev->parent)); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(dev, "failed to get regmap: %d\n", ret); + return ret; + } + + num_fields = dev_read_size(dev, "mux-reg-masks"); + if (num_fields < 0) + return log_msg_ret("mux-reg-masks missing", -EINVAL); + + num_fields /= sizeof(u32); + if (num_fields == 0 || num_fields % 2) + ret = -EINVAL; + num_fields = num_fields / 2; + + ret = mux_alloc_controllers(dev, num_fields); + if (ret < 0) + return log_msg_ret("mux_alloc_controllers", ret); + + fields = devm_kmalloc(dev, num_fields * sizeof(*fields), __GFP_ZERO); + if (!fields) + return -ENOMEM; + dev->priv = fields; + + mux_reg_masks = devm_kmalloc(dev, num_fields * 2 * sizeof(u32), + __GFP_ZERO); + if (!mux_reg_masks) + return -ENOMEM; + + ret = dev_read_u32_array(dev, "mux-reg-masks", mux_reg_masks, + num_fields * 2); + if (ret < 0) + return log_msg_ret("mux-reg-masks read", ret); + + idle_states = devm_kmalloc(dev, num_fields * sizeof(u32), __GFP_ZERO); + if (!idle_states) + return -ENOMEM; + + ret = dev_read_u32_array(dev, "idle-states", idle_states, num_fields); + if (ret < 0) { + log_err("idle-states"); + devm_kfree(dev, idle_states); + idle_states = NULL; + } + + for (i = 0; i < num_fields; i++) { + struct mux_control *mux = &mux_chip->mux[i]; + struct reg_field field; + u32 reg, mask; + int bits; + + reg = mux_reg_masks[2 * i]; + mask = mux_reg_masks[2 * i + 1]; + + field.reg = reg; + field.msb = fls(mask) - 1; + field.lsb = ffs(mask) - 1; + + if (mask != GENMASK(field.msb, field.lsb)) + return log_msg_ret("invalid mask", -EINVAL); + + fields[i] = devm_regmap_field_alloc(dev, regmap, field); + if (IS_ERR(fields[i])) { + ret = PTR_ERR(fields[i]); + return log_msg_ret("regmap_field_alloc", ret); + } + + bits = 1 + field.msb - field.lsb; + mux->states = 1 << bits; + + if (!idle_states) + continue; + + if (idle_states[i] != MUX_IDLE_AS_IS && + idle_states[i] >= mux->states) + return log_msg_ret("idle-states range", -EINVAL); + + mux->idle_state = idle_states[i]; + } + + devm_kfree(dev, mux_reg_masks); + if (idle_states) + devm_kfree(dev, idle_states); + + return 0; +} + +U_BOOT_DRIVER(mmio_mux) = { + .name = "mmio-mux", + .id = UCLASS_MUX, + .of_match = mmio_mux_of_match, + .probe = mmio_mux_probe, + .ops = &mux_mmio_ops, +}; diff --git a/drivers/mux/mux-uclass.c b/drivers/mux/mux-uclass.c new file mode 100644 index 00000000000..a35c3d9c948 --- /dev/null +++ b/drivers/mux/mux-uclass.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Multiplexer subsystem + * + * Based on the linux multiplexer framework + * + * Copyright (C) 2017 Axentia Technologies AB + * Author: Peter Rosin <peda@axentia.se> + * + * Copyright (C) 2017-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Jean-Jacques Hiblot <jjhiblot@ti.com> + */ + +#include <common.h> +#include <dm.h> +#include <mux-internal.h> +#include <dm/device-internal.h> +#include <dm/device_compat.h> +#include <dm/devres.h> +#include <dt-bindings/mux/mux.h> +#include <linux/bug.h> + +/* + * The idle-as-is "state" is not an actual state that may be selected, it + * only implies that the state should not be changed. So, use that state + * as indication that the cached state of the multiplexer is unknown. + */ +#define MUX_CACHE_UNKNOWN MUX_IDLE_AS_IS + +/** + * mux_control_ops() - Get the mux_control ops. + * @dev: The client device. + * + * Return: A pointer to the 'mux_control_ops' of the device. + */ +static inline const struct mux_control_ops *mux_dev_ops(struct udevice *dev) +{ + return (const struct mux_control_ops *)dev->driver->ops; +} + +/** + * mux_control_set() - Set the state of the given mux controller. + * @mux: A multiplexer control + * @state: The new requested state. + * + * Return: 0 if OK, or a negative error code. + */ +static int mux_control_set(struct mux_control *mux, int state) +{ + int ret = mux_dev_ops(mux->dev)->set(mux, state); + + mux->cached_state = ret < 0 ? MUX_CACHE_UNKNOWN : state; + + return ret; +} + +unsigned int mux_control_states(struct mux_control *mux) +{ + return mux->states; +} + +/** + * __mux_control_select() - Select the given multiplexer state. + * @mux: The mux-control to request a change of state from. + * @state: The new requested state. + * + * Try to set the mux to the requested state. If not, try to revert if + * appropriate. + */ +static int __mux_control_select(struct mux_control *mux, int state) +{ + int ret; + + if (WARN_ON(state < 0 || state >= mux->states)) + return -EINVAL; + + if (mux->cached_state == state) + return 0; + + ret = mux_control_set(mux, state); + if (ret >= 0) + return 0; + + /* The mux update failed, try to revert if appropriate... */ + if (mux->idle_state != MUX_IDLE_AS_IS) + mux_control_set(mux, mux->idle_state); + + return ret; +} + +int mux_control_select(struct mux_control *mux, unsigned int state) +{ + int ret; + + if (mux->in_use) + return -EBUSY; + + ret = __mux_control_select(mux, state); + + if (ret < 0) + return ret; + + mux->in_use = true; + + return 0; +} + +int mux_control_deselect(struct mux_control *mux) +{ + int ret = 0; + + if (mux->idle_state != MUX_IDLE_AS_IS && + mux->idle_state != mux->cached_state) + ret = mux_control_set(mux, mux->idle_state); + + mux->in_use = false; + + return ret; +} + +static int mux_of_xlate_default(struct mux_chip *mux_chip, + struct ofnode_phandle_args *args, + struct mux_control **muxp) +{ + struct mux_control *mux; + int id; + + log_debug("%s(muxp=%p)\n", __func__, muxp); + + if (args->args_count > 1) { + debug("Invaild args_count: %d\n", args->args_count); + return -EINVAL; + } + + if (args->args_count) + id = args->args[0]; + else + id = 0; + + if (id >= mux_chip->controllers) { + pr_err("bad mux controller %u specified in %s\n", + id, ofnode_get_name(args->node)); + return -ERANGE; + } + + mux = &mux_chip->mux[id]; + mux->id = id; + *muxp = mux; + return 0; +} + +/** + * mux_get_by_indexed_prop() - Get a mux control by integer index + * @dev: The client device. + * @prop_name: Name of the device tree property. + * @index: The index of the mux to get + * @mux: A pointer to the 'mux_control' struct to initialize. + * + * Return: 0 of OK, -errno otherwise. + */ +static int mux_get_by_indexed_prop(struct udevice *dev, const char *prop_name, + int index, struct mux_control **mux) +{ + int ret; + struct ofnode_phandle_args args; + struct udevice *dev_mux; + const struct mux_control_ops *ops; + struct mux_chip *mux_chip; + + log_debug("%s(dev=%p, index=%d, mux=%p)\n", __func__, dev, index, mux); + + ret = dev_read_phandle_with_args(dev, prop_name, "#mux-control-cells", + 0, index, &args); + if (ret) { + debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n", + __func__, ret); + return ret; + } + + ret = uclass_get_device_by_ofnode(UCLASS_MUX, args.node, &dev_mux); + if (ret) { + debug("%s: uclass_get_device_by_ofnode failed: err=%d\n", + __func__, ret); + return ret; + } + + mux_chip = dev_get_uclass_priv(dev_mux); + + ops = mux_dev_ops(dev_mux); + if (ops->of_xlate) + ret = ops->of_xlate(mux_chip, &args, mux); + else + ret = mux_of_xlate_default(mux_chip, &args, mux); + if (ret) { + debug("of_xlate() failed: %d\n", ret); + return ret; + } + (*mux)->dev = dev_mux; + + return 0; +} + +int mux_get_by_index(struct udevice *dev, int index, struct mux_control **mux) +{ + return mux_get_by_indexed_prop(dev, "mux-controls", index, mux); +} + +int mux_control_get(struct udevice *dev, const char *name, + struct mux_control **mux) +{ + int index; + + debug("%s(dev=%p, name=%s, mux=%p)\n", __func__, dev, name, mux); + + index = dev_read_stringlist_search(dev, "mux-control-names", name); + if (index < 0) { + debug("fdt_stringlist_search() failed: %d\n", index); + return index; + } + + return mux_get_by_index(dev, index, mux); +} + +void mux_control_put(struct mux_control *mux) +{ + mux_control_deselect(mux); +} + +/** + * devm_mux_control_release() - Release the given managed mux. + * @dev: The client device. + * @res: Pointer to the mux to be released. + * + * This function is called by devres to release the mux. It reverses the + * effects of mux_control_get(). + */ +static void devm_mux_control_release(struct udevice *dev, void *res) +{ + mux_control_put(*(struct mux_control **)res); +} + +struct mux_control *devm_mux_control_get(struct udevice *dev, const char *id) +{ + int rc; + struct mux_control **mux; + + mux = devres_alloc(devm_mux_control_release, + sizeof(struct mux_control *), __GFP_ZERO); + if (unlikely(!mux)) + return ERR_PTR(-ENOMEM); + + rc = mux_control_get(dev, id, mux); + if (rc) + return ERR_PTR(rc); + + devres_add(dev, mux); + return *mux; +} + +int mux_alloc_controllers(struct udevice *dev, unsigned int controllers) +{ + int i; + struct mux_chip *mux_chip = dev_get_uclass_priv(dev); + + mux_chip->mux = devm_kmalloc(dev, + sizeof(struct mux_control) * controllers, + __GFP_ZERO); + if (!mux_chip->mux) + return -ENOMEM; + + mux_chip->controllers = controllers; + + for (i = 0; i < mux_chip->controllers; ++i) { + struct mux_control *mux = &mux_chip->mux[i]; + + mux->dev = dev; + mux->cached_state = MUX_CACHE_UNKNOWN; + mux->idle_state = MUX_IDLE_AS_IS; + mux->in_use = false; + mux->id = i; + } + + return 0; +} + +static int mux_uclass_post_probe(struct udevice *dev) +{ + int i, ret; + struct mux_chip *mux_chip = dev_get_uclass_priv(dev); + + /* Set all mux controllers to their idle state. */ + for (i = 0; i < mux_chip->controllers; ++i) { + struct mux_control *mux = &mux_chip->mux[i]; + + if (mux->idle_state == mux->cached_state) + continue; + + ret = mux_control_set(mux, mux->idle_state); + if (ret < 0) { + dev_err(dev, "unable to set idle state\n"); + return ret; + } + } + return 0; +} + +int dm_mux_init(void) +{ + struct uclass *uc; + struct udevice *dev; + int ret; + + ret = uclass_get(UCLASS_MUX, &uc); + if (ret < 0) { + log_debug("unable to get MUX uclass\n"); + return ret; + } + uclass_foreach_dev(dev, uc) { + if (dev_read_bool(dev, "u-boot,mux-autoprobe")) { + ret = device_probe(dev); + if (ret) + log_debug("unable to probe device %s\n", + dev->name); + } + } + + return 0; +} + +UCLASS_DRIVER(mux) = { + .id = UCLASS_MUX, + .name = "mux", + .post_probe = mux_uclass_post_probe, + .per_device_auto_alloc_size = sizeof(struct mux_chip), +}; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a0d2d21a556..3a5e0368805 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -407,6 +407,37 @@ config MT7628_ETH The MediaTek MT7628 ethernet interface is used on MT7628 and MT7688 based boards. +config NET_OCTEONTX + bool "OcteonTX Ethernet support" + depends on ARCH_OCTEONTX + depends on PCI_SRIOV + help + You must select Y to enable network device support for + OcteonTX SoCs. If unsure, say n + +config NET_OCTEONTX2 + bool "OcteonTX2 Ethernet support" + depends on ARCH_OCTEONTX2 + select OCTEONTX2_CGX_INTF + help + You must select Y to enable network device support for + OcteonTX2 SoCs. If unsure, say n + +config OCTEONTX_SMI + bool "OcteonTX SMI Device support" + depends on ARCH_OCTEONTX || ARCH_OCTEONTX2 + help + You must select Y to enable SMI controller support for + OcteonTX or OcteonTX2 SoCs. If unsure, say n + +config OCTEONTX2_CGX_INTF + bool "OcteonTX2 CGX ATF interface support" + depends on ARCH_OCTEONTX2 + default y if ARCH_OCTEONTX2 + help + You must select Y to enable CGX ATF interface support for + OcteonTX2 SoCs. If unsure, say n + config PCH_GBE bool "Intel Platform Controller Hub EG20T GMAC driver" depends on DM_ETH && DM_PCI @@ -701,6 +732,13 @@ config MDIO_MUX_I2CREG an I2C chip. The board it was developed for uses a mux controlled by on-board FPGA which in turn is accessed as a chip over I2C. +config MDIO_IPQ4019 + bool "Qualcomm IPQ4019 MDIO interface support" + depends on DM_MDIO + help + This driver supports the MDIO interface found in Qualcomm + IPQ40xx series Soc-s. + config MVMDIO bool "Marvell MDIO interface support" depends on DM_MDIO diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 03f01921ead..e3bdda359dc 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_LAN91C96) += lan91c96.o obj-$(CONFIG_LPC32XX_ETH) += lpc32xx_eth.o obj-$(CONFIG_MACB) += macb.o obj-$(CONFIG_MCFFEC) += mcffec.o mcfmii.o +obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o obj-$(CONFIG_MDIO_MUX_I2CREG) += mdio_mux_i2creg.o obj-$(CONFIG_MDIO_MUX_SANDBOX) += mdio_mux_sandbox.o obj-$(CONFIG_MPC8XX_FEC) += mpc8xx_fec.o @@ -65,6 +66,10 @@ obj-$(CONFIG_RENESAS_RAVB) += ravb.o obj-$(CONFIG_SMC91111) += smc91111.o obj-$(CONFIG_SMC911X) += smc911x.o obj-$(CONFIG_TSEC_ENET) += tsec.o fsl_mdio.o +obj-$(CONFIG_NET_OCTEONTX) += octeontx/ +obj-$(CONFIG_NET_OCTEONTX2) += octeontx2/ +obj-$(CONFIG_OCTEONTX_SMI) += octeontx/smi.o +obj-$(CONFIG_OCTEONTX2_CGX_INTF) += octeontx2/cgx_intf.o obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o obj-$(CONFIG_ULI526X) += uli526x.o obj-$(CONFIG_VSC7385_ENET) += vsc7385.o diff --git a/drivers/net/e1000.c b/drivers/net/e1000.c index 49be7667021..8e6c755f641 100644 --- a/drivers/net/e1000.c +++ b/drivers/net/e1000.c @@ -1644,6 +1644,11 @@ e1000_reset_hw(struct e1000_hw *hw) E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); + if (hw->mac_type == e1000_igb) { + E1000_WRITE_REG(hw, RXPBS, I210_RXPBSIZE_DEFAULT); + E1000_WRITE_REG(hw, TXPBS, I210_TXPBSIZE_DEFAULT); + } + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ hw->tbi_compatibility_on = false; diff --git a/drivers/net/e1000.h b/drivers/net/e1000.h index 19ed4777d9a..072851ba31c 100644 --- a/drivers/net/e1000.h +++ b/drivers/net/e1000.h @@ -735,6 +735,7 @@ struct e1000_ffvt_entry { #define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ #define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ #define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ #define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ @@ -745,6 +746,7 @@ struct e1000_ffvt_entry { #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ @@ -2589,4 +2591,8 @@ struct e1000_hw { #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ + +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + #endif /* _E1000_HW_H_ */ diff --git a/drivers/net/ldpaa_eth/ldpaa_eth.c b/drivers/net/ldpaa_eth/ldpaa_eth.c index 67da549fdbf..42eaf49d712 100644 --- a/drivers/net/ldpaa_eth/ldpaa_eth.c +++ b/drivers/net/ldpaa_eth/ldpaa_eth.c @@ -6,20 +6,20 @@ #include <common.h> #include <cpu_func.h> +#include <dm/device_compat.h> +#include <fsl-mc/fsl_dpmac.h> +#include <fsl-mc/ldpaa_wriop.h> +#include <hwconfig.h> #include <log.h> -#include <asm/io.h> -#include <asm/types.h> #include <malloc.h> +#include <miiphy.h> #include <net.h> -#include <hwconfig.h> #include <phy.h> -#include <miiphy.h> +#include <asm/io.h> +#include <asm/types.h> #include <linux/bug.h> #include <linux/compat.h> -#include <fsl-mc/fsl_dpmac.h> #include <linux/delay.h> - -#include <fsl-mc/ldpaa_wriop.h> #include "ldpaa_eth.h" #ifdef CONFIG_PHYLIB diff --git a/drivers/net/ldpaa_eth/lx2160a.c b/drivers/net/ldpaa_eth/lx2160a.c index 1e62c642039..e57f1a19a59 100644 --- a/drivers/net/ldpaa_eth/lx2160a.c +++ b/drivers/net/ldpaa_eth/lx2160a.c @@ -92,7 +92,7 @@ void fsl_rgmii_init(void) & FSL_CHASSIS3_EC1_REGSR_PRTCL_MASK; ec >>= FSL_CHASSIS3_EC1_REGSR_PRTCL_SHIFT; - if (!ec && (wriop_is_enabled_dpmac(17) == -ENODEV)) + if (!ec) wriop_init_dpmac_enet_if(17, PHY_INTERFACE_MODE_RGMII_ID); #endif @@ -101,7 +101,7 @@ void fsl_rgmii_init(void) & FSL_CHASSIS3_EC2_REGSR_PRTCL_MASK; ec >>= FSL_CHASSIS3_EC2_REGSR_PRTCL_SHIFT; - if (!ec && (wriop_is_enabled_dpmac(18) == -ENODEV)) + if (!ec) wriop_init_dpmac_enet_if(18, PHY_INTERFACE_MODE_RGMII_ID); #endif } diff --git a/drivers/net/mdio-ipq4019.c b/drivers/net/mdio-ipq4019.c new file mode 100644 index 00000000000..bc68e1d5065 --- /dev/null +++ b/drivers/net/mdio-ipq4019.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Qualcomm IPQ4019 MDIO driver + * + * Copyright (c) 2020 Sartura Ltd. + * + * Author: Luka Kovacic <luka.kovacic@sartura.hr> + * Author: Robert Marko <robert.marko@sartura.hr> + * + * Based on Linux driver + */ + +#include <asm/io.h> +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <linux/bitops.h> +#include <linux/iopoll.h> +#include <miiphy.h> +#include <phy.h> + +#define MDIO_MODE_REG 0x40 +#define MDIO_ADDR_REG 0x44 +#define MDIO_DATA_WRITE_REG 0x48 +#define MDIO_DATA_READ_REG 0x4c +#define MDIO_CMD_REG 0x50 +#define MDIO_CMD_ACCESS_BUSY BIT(16) +#define MDIO_CMD_ACCESS_START BIT(8) +#define MDIO_CMD_ACCESS_CODE_READ 0 +#define MDIO_CMD_ACCESS_CODE_WRITE 1 + +/* 0 = Clause 22, 1 = Clause 45 */ +#define MDIO_MODE_BIT BIT(8) + +#define IPQ4019_MDIO_TIMEOUT 10000 +#define IPQ4019_MDIO_SLEEP 10 + +struct ipq4019_mdio_priv { + phys_addr_t mdio_base; +}; + +static int ipq4019_mdio_wait_busy(struct ipq4019_mdio_priv *priv) +{ + unsigned int busy; + + return readl_poll_sleep_timeout(priv->mdio_base + MDIO_CMD_REG, busy, + (busy & MDIO_CMD_ACCESS_BUSY) == 0, IPQ4019_MDIO_SLEEP, + IPQ4019_MDIO_TIMEOUT); +} + +int ipq4019_mdio_read(struct udevice *dev, int addr, int devad, int reg) +{ + struct ipq4019_mdio_priv *priv = dev_get_priv(dev); + unsigned int cmd; + + if (ipq4019_mdio_wait_busy(priv)) + return -ETIMEDOUT; + + /* Issue the phy address and reg */ + writel((addr << 8) | reg, priv->mdio_base + MDIO_ADDR_REG); + + cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ; + + /* Issue read command */ + writel(cmd, priv->mdio_base + MDIO_CMD_REG); + + /* Wait read complete */ + if (ipq4019_mdio_wait_busy(priv)) + return -ETIMEDOUT; + + /* Read and return data */ + return readl(priv->mdio_base + MDIO_DATA_READ_REG); +} + +int ipq4019_mdio_write(struct udevice *dev, int addr, int devad, + int reg, u16 val) +{ + struct ipq4019_mdio_priv *priv = dev_get_priv(dev); + unsigned int cmd; + + if (ipq4019_mdio_wait_busy(priv)) + return -ETIMEDOUT; + + /* Issue the phy addreass and reg */ + writel((addr << 8) | reg, priv->mdio_base + MDIO_ADDR_REG); + + /* Issue write data */ + writel(val, priv->mdio_base + MDIO_DATA_WRITE_REG); + + cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE; + + /* Issue write command */ + writel(cmd, priv->mdio_base + MDIO_CMD_REG); + + /* Wait for write complete */ + + if (ipq4019_mdio_wait_busy(priv)) + return -ETIMEDOUT; + + return 0; +} + +static const struct mdio_ops ipq4019_mdio_ops = { + .read = ipq4019_mdio_read, + .write = ipq4019_mdio_write, +}; + +static int ipq4019_mdio_bind(struct udevice *dev) +{ + if (ofnode_valid(dev->node)) + device_set_name(dev, ofnode_get_name(dev->node)); + + return 0; +} + +static int ipq4019_mdio_probe(struct udevice *dev) +{ + struct ipq4019_mdio_priv *priv = dev_get_priv(dev); + unsigned int data; + + priv->mdio_base = dev_read_addr(dev); + if (priv->mdio_base == FDT_ADDR_T_NONE) + return -EINVAL; + + /* Enter Clause 22 mode */ + data = readl(priv->mdio_base + MDIO_MODE_REG); + data &= ~MDIO_MODE_BIT; + writel(data, priv->mdio_base + MDIO_MODE_REG); + + return 0; +} + +static const struct udevice_id ipq4019_mdio_ids[] = { + { .compatible = "qcom,ipq4019-mdio", }, + { } +}; + +U_BOOT_DRIVER(ipq4019_mdio) = { + .name = "ipq4019_mdio", + .id = UCLASS_MDIO, + .of_match = ipq4019_mdio_ids, + .bind = ipq4019_mdio_bind, + .probe = ipq4019_mdio_probe, + .ops = &ipq4019_mdio_ops, + .priv_auto_alloc_size = sizeof(struct ipq4019_mdio_priv), +}; diff --git a/drivers/net/octeontx/Makefile b/drivers/net/octeontx/Makefile new file mode 100644 index 00000000000..d4adb7cdba2 --- /dev/null +++ b/drivers/net/octeontx/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2018 Marvell International Ltd. +# + +obj-$(CONFIG_NET_OCTEONTX) += bgx.o nic_main.o nicvf_queues.o nicvf_main.o \ + xcv.o diff --git a/drivers/net/octeontx/bgx.c b/drivers/net/octeontx/bgx.c new file mode 100644 index 00000000000..fbe2e2c073e --- /dev/null +++ b/drivers/net/octeontx/bgx.c @@ -0,0 +1,1565 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <config.h> +#include <dm.h> +#include <errno.h> +#include <fdt_support.h> +#include <malloc.h> +#include <miiphy.h> +#include <misc.h> +#include <net.h> +#include <netdev.h> +#include <pci.h> +#include <pci_ids.h> +#include <asm/io.h> +#include <asm/arch/board.h> +#include <linux/delay.h> +#include <linux/libfdt.h> + +#include "nic_reg.h" +#include "nic.h" +#include "bgx.h" + +static const phy_interface_t if_mode[] = { + [QLM_MODE_SGMII] = PHY_INTERFACE_MODE_SGMII, + [QLM_MODE_RGMII] = PHY_INTERFACE_MODE_RGMII, + [QLM_MODE_QSGMII] = PHY_INTERFACE_MODE_QSGMII, + [QLM_MODE_XAUI] = PHY_INTERFACE_MODE_XAUI, + [QLM_MODE_RXAUI] = PHY_INTERFACE_MODE_RXAUI, +}; + +struct lmac { + struct bgx *bgx; + int dmac; + u8 mac[6]; + bool link_up; + bool init_pend; + int lmacid; /* ID within BGX */ + int phy_addr; /* ID on board */ + struct udevice *dev; + struct mii_dev *mii_bus; + struct phy_device *phydev; + unsigned int last_duplex; + unsigned int last_link; + unsigned int last_speed; + int lane_to_sds; + int use_training; + int lmac_type; + u8 qlm_mode; + int qlm; + bool is_1gx; +}; + +struct bgx { + u8 bgx_id; + int node; + struct lmac lmac[MAX_LMAC_PER_BGX]; + int lmac_count; + u8 max_lmac; + void __iomem *reg_base; + struct pci_dev *pdev; + bool is_rgx; +}; + +struct bgx_board_info bgx_board_info[MAX_BGX_PER_NODE]; + +struct bgx *bgx_vnic[MAX_BGX_PER_NODE]; + +/* APIs to read/write BGXX CSRs */ +static u64 bgx_reg_read(struct bgx *bgx, uint8_t lmac, u64 offset) +{ + u64 addr = (uintptr_t)bgx->reg_base + + ((uint32_t)lmac << 20) + offset; + + return readq((void *)addr); +} + +static void bgx_reg_write(struct bgx *bgx, uint8_t lmac, + u64 offset, u64 val) +{ + u64 addr = (uintptr_t)bgx->reg_base + + ((uint32_t)lmac << 20) + offset; + + writeq(val, (void *)addr); +} + +static void bgx_reg_modify(struct bgx *bgx, uint8_t lmac, + u64 offset, u64 val) +{ + u64 addr = (uintptr_t)bgx->reg_base + + ((uint32_t)lmac << 20) + offset; + + writeq(val | bgx_reg_read(bgx, lmac, offset), (void *)addr); +} + +static int bgx_poll_reg(struct bgx *bgx, uint8_t lmac, + u64 reg, u64 mask, bool zero) +{ + int timeout = 200; + u64 reg_val; + + while (timeout) { + reg_val = bgx_reg_read(bgx, lmac, reg); + if (zero && !(reg_val & mask)) + return 0; + if (!zero && (reg_val & mask)) + return 0; + mdelay(1); + timeout--; + } + return 1; +} + +static int gser_poll_reg(u64 reg, int bit, u64 mask, u64 expected_val, + int timeout) +{ + u64 reg_val; + + debug("%s reg = %#llx, mask = %#llx,", __func__, reg, mask); + debug(" expected_val = %#llx, bit = %d\n", expected_val, bit); + while (timeout) { + reg_val = readq(reg) >> bit; + if ((reg_val & mask) == (expected_val)) + return 0; + mdelay(1); + timeout--; + } + return 1; +} + +static bool is_bgx_port_valid(int bgx, int lmac) +{ + debug("%s bgx %d lmac %d valid %d\n", __func__, bgx, lmac, + bgx_board_info[bgx].lmac_reg[lmac]); + + if (bgx_board_info[bgx].lmac_reg[lmac]) + return 1; + else + return 0; +} + +struct lmac *bgx_get_lmac(int node, int bgx_idx, int lmacid) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + + if (bgx) + return &bgx->lmac[lmacid]; + + return NULL; +} + +const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + + if (bgx) + return bgx->lmac[lmacid].mac; + + return NULL; +} + +void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + + if (!bgx) + return; + + memcpy(bgx->lmac[lmacid].mac, mac, 6); +} + +/* Return number of BGX present in HW */ +void bgx_get_count(int node, int *bgx_count) +{ + int i; + struct bgx *bgx; + + *bgx_count = 0; + for (i = 0; i < MAX_BGX_PER_NODE; i++) { + bgx = bgx_vnic[node * MAX_BGX_PER_NODE + i]; + debug("bgx_vnic[%u]: %p\n", node * MAX_BGX_PER_NODE + i, + bgx); + if (bgx) + *bgx_count |= (1 << i); + } +} + +/* Return number of LMAC configured for this BGX */ +int bgx_get_lmac_count(int node, int bgx_idx) +{ + struct bgx *bgx; + + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + if (bgx) + return bgx->lmac_count; + + return 0; +} + +void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + u64 cfg; + + if (!bgx) + return; + + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + if (enable) + cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; + else + cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); +} + +static void bgx_flush_dmac_addrs(struct bgx *bgx, u64 lmac) +{ + u64 dmac = 0x00; + u64 offset, addr; + + while (bgx->lmac[lmac].dmac > 0) { + offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(dmac)) + + (lmac * MAX_DMAC_PER_LMAC * sizeof(dmac)); + addr = (uintptr_t)bgx->reg_base + + BGX_CMR_RX_DMACX_CAM + offset; + writeq(dmac, (void *)addr); + bgx->lmac[lmac].dmac--; + } +} + +/* Configure BGX LMAC in internal loopback mode */ +void bgx_lmac_internal_loopback(int node, int bgx_idx, + int lmac_idx, bool enable) +{ + struct bgx *bgx; + struct lmac *lmac; + u64 cfg; + + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + if (!bgx) + return; + + lmac = &bgx->lmac[lmac_idx]; + if (lmac->qlm_mode == QLM_MODE_SGMII) { + cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); + if (enable) + cfg |= PCS_MRX_CTL_LOOPBACK1; + else + cfg &= ~PCS_MRX_CTL_LOOPBACK1; + bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); + } else { + cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); + if (enable) + cfg |= SPU_CTL_LOOPBACK; + else + cfg &= ~SPU_CTL_LOOPBACK; + bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); + } +} + +/* Return the DLM used for the BGX */ +static int get_qlm_for_bgx(int node, int bgx_id, int index) +{ + int qlm = 0; + u64 cfg; + + if (otx_is_soc(CN81XX)) { + qlm = (bgx_id) ? 2 : 0; + qlm += (index >= 2) ? 1 : 0; + } else if (otx_is_soc(CN83XX)) { + switch (bgx_id) { + case 0: + qlm = 2; + break; + case 1: + qlm = 3; + break; + case 2: + if (index >= 2) + qlm = 6; + else + qlm = 5; + break; + case 3: + qlm = 4; + break; + } + } + + cfg = readq(GSERX_CFG(qlm)) & GSERX_CFG_BGX; + debug("%s:qlm%d: cfg = %lld\n", __func__, qlm, cfg); + + /* Check if DLM is configured as BGX# */ + if (cfg) { + if (readq(GSERX_PHY_CTL(qlm))) + return -1; + return qlm; + } + return -1; +} + +static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) +{ + u64 cfg; + struct lmac *lmac; + + lmac = &bgx->lmac[lmacid]; + + debug("%s:bgx_id = %d, lmacid = %d\n", __func__, bgx->bgx_id, lmacid); + + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); + /* max packet size */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); + + /* Disable frame alignment if using preamble */ + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); + if (cfg & 1) + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); + + /* PCS reset */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); + if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, + PCS_MRX_CTL_RESET, true)) { + printf("BGX PCS reset not completed\n"); + return -1; + } + + /* power down, reset autoneg, autoneg enable */ + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); + cfg &= ~PCS_MRX_CTL_PWR_DN; + + if (bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis) + cfg |= (PCS_MRX_CTL_RST_AN); + else + cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); + + /* Disable disparity for QSGMII mode, to prevent propogation across + * ports. + */ + + if (lmac->qlm_mode == QLM_MODE_QSGMII) { + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL); + cfg &= ~PCS_MISCX_CTL_DISP_EN; + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg); + return 0; /* Skip checking AN_CPT */ + } + + if (lmac->is_1gx) { + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL); + cfg |= PCS_MISC_CTL_MODE; + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg); + } + + if (lmac->qlm_mode == QLM_MODE_SGMII) { + if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, + PCS_MRX_STATUS_AN_CPT, false)) { + printf("BGX AN_CPT not completed\n"); + return -1; + } + } + + return 0; +} + +static int bgx_lmac_sgmii_set_link_speed(struct lmac *lmac) +{ + u64 prtx_cfg; + u64 pcs_miscx_ctl; + u64 cfg; + struct bgx *bgx = lmac->bgx; + unsigned int lmacid = lmac->lmacid; + + debug("%s: lmacid %d\n", __func__, lmac->lmacid); + + /* Disable LMAC before setting up speed */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + /* Read GMX CFG */ + prtx_cfg = bgx_reg_read(bgx, lmacid, + BGX_GMP_GMI_PRTX_CFG); + /* Read PCS MISCS CTL */ + pcs_miscx_ctl = bgx_reg_read(bgx, lmacid, + BGX_GMP_PCS_MISCX_CTL); + + /* Use GMXENO to force the link down*/ + if (lmac->link_up) { + pcs_miscx_ctl &= ~PCS_MISC_CTL_GMX_ENO; + /* change the duplex setting if the link is up */ + prtx_cfg |= GMI_PORT_CFG_DUPLEX; + } else { + pcs_miscx_ctl |= PCS_MISC_CTL_GMX_ENO; + } + + /* speed based setting for GMX */ + switch (lmac->last_speed) { + case 10: + prtx_cfg &= ~GMI_PORT_CFG_SPEED; + prtx_cfg |= GMI_PORT_CFG_SPEED_MSB; + prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME; + pcs_miscx_ctl |= 50; /* sampling point */ + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40); + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0); + break; + case 100: + prtx_cfg &= ~GMI_PORT_CFG_SPEED; + prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB; + prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME; + pcs_miscx_ctl |= 0x5; /* sampling point */ + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40); + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0); + break; + case 1000: + prtx_cfg |= GMI_PORT_CFG_SPEED; + prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB; + prtx_cfg |= GMI_PORT_CFG_SLOT_TIME; + pcs_miscx_ctl |= 0x1; /* sampling point */ + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x200); + if (lmac->last_duplex) + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0); + else /* half duplex */ + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, + 0x2000); + break; + default: + break; + } + + /* write back the new PCS misc and GMX settings */ + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, pcs_miscx_ctl); + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG, prtx_cfg); + + /* read back GMX CFG again to check config completion */ + bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG); + + /* enable BGX back */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg |= CMR_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + return 0; +} + +static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) +{ + u64 cfg; + struct lmac *lmac; + + lmac = &bgx->lmac[lmacid]; + + /* Reset SPU */ + bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { + printf("BGX SPU reset not completed\n"); + return -1; + } + + /* Disable LMAC */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); + /* Set interleaved running disparity for RXAUI */ + if (lmac->qlm_mode != QLM_MODE_RXAUI) + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); + else + bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, + SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); + + /* clear all interrupts */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); + bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + + if (lmac->use_training) { + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); + /* training enable */ + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); + } + + /* Append FCS to each packet */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); + + /* Disable forward error correction */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); + cfg &= ~SPU_FEC_CTL_FEC_EN; + bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); + + /* Disable autoneg */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); + cfg = cfg & ~(SPU_AN_CTL_XNP_EN); + if (lmac->use_training) + cfg = cfg | (SPU_AN_CTL_AN_EN); + else + cfg = cfg & ~(SPU_AN_CTL_AN_EN); + bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); + /* Clear all KR bits, configure according to the mode */ + cfg &= ~((0xfULL << 22) | (1ULL << 12)); + if (lmac->qlm_mode == QLM_MODE_10G_KR) + cfg |= (1 << 23); + else if (lmac->qlm_mode == QLM_MODE_40G_KR4) + cfg |= (1 << 24); + bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); + + cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); + if (lmac->use_training) + cfg |= SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; + else + cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; + bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); + cfg &= ~SPU_CTL_LOW_POWER; + bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); + cfg &= ~SMU_TX_CTL_UNI_EN; + cfg |= SMU_TX_CTL_DIC_EN; + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); + + /* take lmac_count into account */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); + /* max packet size */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); + + debug("xaui_init: lmacid = %d, qlm = %d, qlm_mode = %d\n", + lmacid, lmac->qlm, lmac->qlm_mode); + /* RXAUI with Marvell PHY requires some tweaking */ + if (lmac->qlm_mode == QLM_MODE_RXAUI) { + char mii_name[20]; + struct phy_info *phy; + + phy = &bgx_board_info[bgx->bgx_id].phy_info[lmacid]; + snprintf(mii_name, sizeof(mii_name), "smi%d", phy->mdio_bus); + + debug("mii_name: %s\n", mii_name); + lmac->mii_bus = miiphy_get_dev_by_name(mii_name); + lmac->phy_addr = phy->phy_addr; + rxaui_phy_xs_init(lmac->mii_bus, lmac->phy_addr); + } + + return 0; +} + +/* Get max number of lanes present in a given QLM/DLM */ +static int get_qlm_lanes(int qlm) +{ + if (otx_is_soc(CN81XX)) + return 2; + else if (otx_is_soc(CN83XX)) + return (qlm >= 5) ? 2 : 4; + else + return -1; +} + +int __rx_equalization(int qlm, int lane) +{ + int max_lanes = get_qlm_lanes(qlm); + int l; + int fail = 0; + + /* Before completing Rx equalization wait for + * GSERx_RX_EIE_DETSTS[CDRLOCK] to be set + * This ensures the rx data is valid + */ + if (lane == -1) { + if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK, 0xf, + (1 << max_lanes) - 1, 100)) { + debug("ERROR: CDR Lock not detected"); + debug(" on DLM%d for 2 lanes\n", qlm); + return -1; + } + } else { + if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK, + (0xf & (1 << lane)), (1 << lane), 100)) { + debug("ERROR: DLM%d: CDR Lock not detected", qlm); + debug(" on %d lane\n", lane); + return -1; + } + } + + for (l = 0; l < max_lanes; l++) { + u64 rctl, reer; + + if (lane != -1 && lane != l) + continue; + + /* Enable software control */ + rctl = readq(GSER_BR_RXX_CTL(qlm, l)); + rctl |= GSER_BR_RXX_CTL_RXT_SWM; + writeq(rctl, GSER_BR_RXX_CTL(qlm, l)); + + /* Clear the completion flag and initiate a new request */ + reer = readq(GSER_BR_RXX_EER(qlm, l)); + reer &= ~GSER_BR_RXX_EER_RXT_ESV; + reer |= GSER_BR_RXX_EER_RXT_EER; + writeq(reer, GSER_BR_RXX_EER(qlm, l)); + } + + /* Wait for RX equalization to complete */ + for (l = 0; l < max_lanes; l++) { + u64 rctl, reer; + + if (lane != -1 && lane != l) + continue; + + gser_poll_reg(GSER_BR_RXX_EER(qlm, l), EER_RXT_ESV, 1, 1, 200); + reer = readq(GSER_BR_RXX_EER(qlm, l)); + + /* Switch back to hardware control */ + rctl = readq(GSER_BR_RXX_CTL(qlm, l)); + rctl &= ~GSER_BR_RXX_CTL_RXT_SWM; + writeq(rctl, GSER_BR_RXX_CTL(qlm, l)); + + if (reer & GSER_BR_RXX_EER_RXT_ESV) { + debug("Rx equalization completed on DLM%d", qlm); + debug(" QLM%d rxt_esm = 0x%llx\n", l, (reer & 0x3fff)); + } else { + debug("Rx equalization timedout on DLM%d", qlm); + debug(" lane %d\n", l); + fail = 1; + } + } + + return (fail) ? -1 : 0; +} + +static int bgx_xaui_check_link(struct lmac *lmac) +{ + struct bgx *bgx = lmac->bgx; + int lmacid = lmac->lmacid; + int lmac_type = lmac->lmac_type; + u64 cfg; + + bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); + + /* check if auto negotiation is complete */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); + if (cfg & SPU_AN_CTL_AN_EN) { + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_STATUS); + if (!(cfg & SPU_AN_STS_AN_COMPLETE)) { + /* Restart autonegotiation */ + debug("restarting auto-neg\n"); + bgx_reg_modify(bgx, lmacid, BGX_SPUX_AN_CONTROL, + SPU_AN_CTL_AN_RESTART); + return -1; + } + } + + debug("%s link use_training %d\n", __func__, lmac->use_training); + if (lmac->use_training) { + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + if (!(cfg & (1ull << 13))) { + debug("waiting for link training\n"); + /* Clear the training interrupts (W1C) */ + cfg = (1ull << 13) | (1ull << 14); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + + udelay(2000); + /* Restart training */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); + cfg |= (1ull << 0); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); + return -1; + } + } + + /* Perform RX Equalization. Applies to non-KR interfaces for speeds + * >= 6.25Gbps. + */ + if (!lmac->use_training) { + int qlm; + bool use_dlm = 0; + + if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) && + bgx->bgx_id == 2)) + use_dlm = 1; + switch (lmac->lmac_type) { + default: + case BGX_MODE_SGMII: + case BGX_MODE_RGMII: + case BGX_MODE_XAUI: + /* Nothing to do */ + break; + case BGX_MODE_XLAUI: + if (use_dlm) { + if (__rx_equalization(lmac->qlm, -1) || + __rx_equalization(lmac->qlm + 1, -1)) { + printf("BGX%d:%d", bgx->bgx_id, lmacid); + printf(" Waiting for RX Equalization"); + printf(" on DLM%d/DLM%d\n", + lmac->qlm, lmac->qlm + 1); + return -1; + } + } else { + if (__rx_equalization(lmac->qlm, -1)) { + printf("BGX%d:%d", bgx->bgx_id, lmacid); + printf(" Waiting for RX Equalization"); + printf(" on QLM%d\n", lmac->qlm); + return -1; + } + } + break; + case BGX_MODE_RXAUI: + /* RXAUI0 uses LMAC0:QLM0/QLM2 and RXAUI1 uses + * LMAC1:QLM1/QLM3 RXAUI requires 2 lanes + * for each interface + */ + qlm = lmac->qlm; + if (__rx_equalization(qlm, 0)) { + printf("BGX%d:%d", bgx->bgx_id, lmacid); + printf(" Waiting for RX Equalization"); + printf(" on QLM%d, Lane0\n", qlm); + return -1; + } + if (__rx_equalization(qlm, 1)) { + printf("BGX%d:%d", bgx->bgx_id, lmacid); + printf(" Waiting for RX Equalization"); + printf(" on QLM%d, Lane1\n", qlm); + return -1; + } + break; + case BGX_MODE_XFI: + { + int lid; + bool altpkg = otx_is_altpkg(); + + if (bgx->bgx_id == 0 && altpkg && lmacid) + lid = 0; + else if ((lmacid >= 2) && use_dlm) + lid = lmacid - 2; + else + lid = lmacid; + + if (__rx_equalization(lmac->qlm, lid)) { + printf("BGX%d:%d", bgx->bgx_id, lid); + printf(" Waiting for RX Equalization"); + printf(" on QLM%d\n", lmac->qlm); + } + } + break; + } + } + + /* wait for PCS to come out of reset */ + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { + printf("BGX SPU reset not completed\n"); + return -1; + } + + if (lmac_type == 3 || lmac_type == 4) { + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, + SPU_BR_STATUS_BLK_LOCK, false)) { + printf("SPU_BR_STATUS_BLK_LOCK not completed\n"); + return -1; + } + } else { + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, + SPU_BX_STATUS_RX_ALIGN, false)) { + printf("SPU_BX_STATUS_RX_ALIGN not completed\n"); + return -1; + } + } + + /* Clear rcvflt bit (latching high) and read it back */ + bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { + printf("Receive fault, retry training\n"); + if (lmac->use_training) { + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + if (!(cfg & (1ull << 13))) { + cfg = (1ull << 13) | (1ull << 14); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL); + cfg |= (1ull << 0); + bgx_reg_write(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL, cfg); + return -1; + } + } + return -1; + } + + /* Wait for MAC RX to be ready */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, + SMU_RX_CTL_STATUS, true)) { + printf("SMU RX link not okay\n"); + return -1; + } + + /* Wait for BGX RX to be idle */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { + printf("SMU RX not idle\n"); + return -1; + } + + /* Wait for BGX TX to be idle */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { + printf("SMU TX not idle\n"); + return -1; + } + + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { + printf("Receive fault\n"); + return -1; + } + + /* Receive link is latching low. Force it high and verify it */ + if (!(bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS1) & + SPU_STATUS1_RCV_LNK)) + bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, + SPU_STATUS1_RCV_LNK); + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, + SPU_STATUS1_RCV_LNK, false)) { + printf("SPU receive link down\n"); + return -1; + } + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); + cfg &= ~SPU_MISC_CTL_RX_DIS; + bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); + return 0; +} + +static int bgx_lmac_enable(struct bgx *bgx, int8_t lmacid) +{ + struct lmac *lmac; + u64 cfg; + + lmac = &bgx->lmac[lmacid]; + + debug("%s: lmac: %p, lmacid = %d\n", __func__, lmac, lmacid); + + if (lmac->qlm_mode == QLM_MODE_SGMII || + lmac->qlm_mode == QLM_MODE_RGMII || + lmac->qlm_mode == QLM_MODE_QSGMII) { + if (bgx_lmac_sgmii_init(bgx, lmacid)) { + debug("bgx_lmac_sgmii_init failed\n"); + return -1; + } + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); + cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); + } else { + if (bgx_lmac_xaui_init(bgx, lmacid, lmac->lmac_type)) + return -1; + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); + cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); + } + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, + CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); + + return 0; +} + +int bgx_poll_for_link(int node, int bgx_idx, int lmacid) +{ + int ret; + struct lmac *lmac = bgx_get_lmac(node, bgx_idx, lmacid); + char mii_name[10]; + struct phy_info *phy; + + if (!lmac) { + printf("LMAC %d/%d/%d is disabled or doesn't exist\n", + node, bgx_idx, lmacid); + return 0; + } + + debug("%s: %d, lmac: %d/%d/%d %p\n", + __FILE__, __LINE__, + node, bgx_idx, lmacid, lmac); + if (lmac->init_pend) { + ret = bgx_lmac_enable(lmac->bgx, lmacid); + if (ret < 0) { + printf("BGX%d LMAC%d lmac_enable failed\n", bgx_idx, + lmacid); + return ret; + } + lmac->init_pend = 0; + mdelay(100); + } + if (lmac->qlm_mode == QLM_MODE_SGMII || + lmac->qlm_mode == QLM_MODE_RGMII || + lmac->qlm_mode == QLM_MODE_QSGMII) { + if (bgx_board_info[bgx_idx].phy_info[lmacid].phy_addr == -1) { + lmac->link_up = 1; + lmac->last_speed = 1000; + lmac->last_duplex = 1; + printf("BGX%d:LMAC %u link up\n", bgx_idx, lmacid); + return lmac->link_up; + } + snprintf(mii_name, sizeof(mii_name), "smi%d", + bgx_board_info[bgx_idx].phy_info[lmacid].mdio_bus); + + debug("mii_name: %s\n", mii_name); + + lmac->mii_bus = miiphy_get_dev_by_name(mii_name); + phy = &bgx_board_info[bgx_idx].phy_info[lmacid]; + lmac->phy_addr = phy->phy_addr; + + debug("lmac->mii_bus: %p\n", lmac->mii_bus); + if (!lmac->mii_bus) { + printf("MDIO device %s not found\n", mii_name); + ret = -ENODEV; + return ret; + } + + lmac->phydev = phy_connect(lmac->mii_bus, lmac->phy_addr, + lmac->dev, + if_mode[lmac->qlm_mode]); + + if (!lmac->phydev) { + printf("%s: No PHY device\n", __func__); + return -1; + } + + ret = phy_config(lmac->phydev); + if (ret) { + printf("%s: Could not initialize PHY %s\n", + __func__, lmac->phydev->dev->name); + return ret; + } + + ret = phy_startup(lmac->phydev); + debug("%s: %d\n", __FILE__, __LINE__); + if (ret) { + printf("%s: Could not initialize PHY %s\n", + __func__, lmac->phydev->dev->name); + } + +#ifdef OCTEONTX_XCV + if (lmac->qlm_mode == QLM_MODE_RGMII) + xcv_setup_link(lmac->phydev->link, lmac->phydev->speed); +#endif + + lmac->link_up = lmac->phydev->link; + lmac->last_speed = lmac->phydev->speed; + lmac->last_duplex = lmac->phydev->duplex; + + debug("%s qlm_mode %d phy link status 0x%x,last speed 0x%x,", + __func__, lmac->qlm_mode, lmac->link_up, + lmac->last_speed); + debug(" duplex 0x%x\n", lmac->last_duplex); + + if (lmac->qlm_mode != QLM_MODE_RGMII) + bgx_lmac_sgmii_set_link_speed(lmac); + + } else { + u64 status1; + u64 tx_ctl; + u64 rx_ctl; + + status1 = bgx_reg_read(lmac->bgx, lmac->lmacid, + BGX_SPUX_STATUS1); + tx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_TX_CTL); + rx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL); + + debug("BGX%d LMAC%d BGX_SPUX_STATUS2: %lx\n", bgx_idx, lmacid, + (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid, + BGX_SPUX_STATUS2)); + debug("BGX%d LMAC%d BGX_SPUX_STATUS1: %lx\n", bgx_idx, lmacid, + (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid, + BGX_SPUX_STATUS1)); + debug("BGX%d LMAC%d BGX_SMUX_RX_CTL: %lx\n", bgx_idx, lmacid, + (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid, + BGX_SMUX_RX_CTL)); + debug("BGX%d LMAC%d BGX_SMUX_TX_CTL: %lx\n", bgx_idx, lmacid, + (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid, + BGX_SMUX_TX_CTL)); + + if ((status1 & SPU_STATUS1_RCV_LNK) && + ((tx_ctl & SMU_TX_CTL_LNK_STATUS) == 0) && + ((rx_ctl & SMU_RX_CTL_STATUS) == 0)) { + lmac->link_up = 1; + if (lmac->lmac_type == 4) + lmac->last_speed = 40000; + else + lmac->last_speed = 10000; + lmac->last_duplex = 1; + } else { + lmac->link_up = 0; + lmac->last_speed = 0; + lmac->last_duplex = 0; + return bgx_xaui_check_link(lmac); + } + + lmac->last_link = lmac->link_up; + } + + printf("BGX%d:LMAC %u link %s\n", bgx_idx, lmacid, + (lmac->link_up) ? "up" : "down"); + + return lmac->link_up; +} + +void bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid) +{ + struct lmac *lmac; + u64 cmrx_cfg; + + lmac = &bgx->lmac[lmacid]; + + cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cmrx_cfg &= ~(1 << 15); + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); + bgx_flush_dmac_addrs(bgx, lmacid); + + if (lmac->phydev) + phy_shutdown(lmac->phydev); + + lmac->phydev = NULL; +} + +/* Program BGXX_CMRX_CONFIG.{lmac_type,lane_to_sds} for each interface. + * And the number of LMACs used by this interface. Each lmac can be in + * programmed in a different mode, so parse each lmac one at a time. + */ +static void bgx_init_hw(struct bgx *bgx) +{ + struct lmac *lmac; + int i, lmacid, count = 0, inc = 0; + char buf[40]; + static int qsgmii_configured; + + for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) { + struct lmac *tlmac; + + lmac = &bgx->lmac[lmacid]; + debug("%s: lmacid = %d, qlm = %d, mode = %d\n", + __func__, lmacid, lmac->qlm, lmac->qlm_mode); + /* If QLM is not programmed, skip */ + if (lmac->qlm == -1) + continue; + + switch (lmac->qlm_mode) { + case QLM_MODE_SGMII: + { + /* EBB8000 (alternative pkg) has only lane0 present on + * DLM0 and DLM1, skip configuring other lanes + */ + if (bgx->bgx_id == 0 && otx_is_altpkg()) { + if (lmacid % 2) + continue; + } + lmac->lane_to_sds = lmacid; + lmac->lmac_type = 0; + snprintf(buf, sizeof(buf), + "BGX%d QLM%d LMAC%d mode: %s\n", + bgx->bgx_id, lmac->qlm, lmacid, + lmac->is_1gx ? "1000Base-X" : "SGMII"); + break; + } + case QLM_MODE_XAUI: + if (lmacid != 0) + continue; + lmac->lmac_type = 1; + lmac->lane_to_sds = 0xE4; + snprintf(buf, sizeof(buf), + "BGX%d QLM%d LMAC%d mode: XAUI\n", + bgx->bgx_id, lmac->qlm, lmacid); + break; + case QLM_MODE_RXAUI: + if (lmacid == 0) { + lmac->lmac_type = 2; + lmac->lane_to_sds = 0x4; + } else if (lmacid == 1) { + struct lmac *tlmac; + + tlmac = &bgx->lmac[2]; + if (tlmac->qlm_mode == QLM_MODE_RXAUI) { + lmac->lmac_type = 2; + lmac->lane_to_sds = 0xe; + lmac->qlm = tlmac->qlm; + } + } else { + continue; + } + snprintf(buf, sizeof(buf), + "BGX%d QLM%d LMAC%d mode: RXAUI\n", + bgx->bgx_id, lmac->qlm, lmacid); + break; + case QLM_MODE_XFI: + /* EBB8000 (alternative pkg) has only lane0 present on + * DLM0 and DLM1, skip configuring other lanes + */ + if (bgx->bgx_id == 0 && otx_is_altpkg()) { + if (lmacid % 2) + continue; + } + lmac->lane_to_sds = lmacid; + lmac->lmac_type = 3; + snprintf(buf, sizeof(buf), + "BGX%d QLM%d LMAC%d mode: XFI\n", + bgx->bgx_id, lmac->qlm, lmacid); + break; + case QLM_MODE_XLAUI: + if (lmacid != 0) + continue; + lmac->lmac_type = 4; + lmac->lane_to_sds = 0xE4; + snprintf(buf, sizeof(buf), + "BGX%d QLM%d LMAC%d mode: XLAUI\n", + bgx->bgx_id, lmac->qlm, lmacid); + break; + case QLM_MODE_10G_KR: + /* EBB8000 (alternative pkg) has only lane0 present on + * DLM0 and DLM1, skip configuring other lanes + */ + if (bgx->bgx_id == 0 && otx_is_altpkg()) { + if (lmacid % 2) + continue; + } + lmac->lane_to_sds = lmacid; + lmac->lmac_type = 3; + lmac->use_training = 1; + snprintf(buf, sizeof(buf), + "BGX%d QLM%d LMAC%d mode: 10G-KR\n", + bgx->bgx_id, lmac->qlm, lmacid); + break; + case QLM_MODE_40G_KR4: + if (lmacid != 0) + continue; + lmac->lmac_type = 4; + lmac->lane_to_sds = 0xE4; + lmac->use_training = 1; + snprintf(buf, sizeof(buf), + "BGX%d QLM%d LMAC%d mode: 40G-KR4\n", + bgx->bgx_id, lmac->qlm, lmacid); + break; + case QLM_MODE_RGMII: + if (lmacid != 0) + continue; + lmac->lmac_type = 5; + lmac->lane_to_sds = 0xE4; + snprintf(buf, sizeof(buf), + "BGX%d LMAC%d mode: RGMII\n", + bgx->bgx_id, lmacid); + break; + case QLM_MODE_QSGMII: + if (qsgmii_configured) + continue; + if (lmacid == 0 || lmacid == 2) { + count = 4; + printf("BGX%d QLM%d LMAC%d mode: QSGMII\n", + bgx->bgx_id, lmac->qlm, lmacid); + for (i = 0; i < count; i++) { + struct lmac *l; + int type; + + l = &bgx->lmac[i]; + l->lmac_type = 6; + type = l->lmac_type; + l->qlm_mode = QLM_MODE_QSGMII; + l->lane_to_sds = lmacid + i; + if (is_bgx_port_valid(bgx->bgx_id, i)) + bgx_reg_write(bgx, i, + BGX_CMRX_CFG, + (type << 8) | + l->lane_to_sds); + } + qsgmii_configured = 1; + } + continue; + default: + continue; + } + + /* Reset lmac to the unused slot */ + if (is_bgx_port_valid(bgx->bgx_id, count) && + lmac->qlm_mode != QLM_MODE_QSGMII) { + int lmac_en = 0; + int tmp, idx; + + tlmac = &bgx->lmac[count]; + tlmac->lmac_type = lmac->lmac_type; + idx = bgx->bgx_id; + tmp = count + inc; + /* Adjust lane_to_sds based on BGX-ENABLE */ + for (; tmp < MAX_LMAC_PER_BGX; inc++) { + lmac_en = bgx_board_info[idx].lmac_enable[tmp]; + if (lmac_en) + break; + tmp = count + inc; + } + + if (inc != 0 && inc < MAX_LMAC_PER_BGX && + lmac_en && inc != count) + tlmac->lane_to_sds = + lmac->lane_to_sds + abs(inc - count); + else + tlmac->lane_to_sds = lmac->lane_to_sds; + tlmac->qlm = lmac->qlm; + tlmac->qlm_mode = lmac->qlm_mode; + + printf("%s", buf); + /* Initialize lmac_type and lane_to_sds */ + bgx_reg_write(bgx, count, BGX_CMRX_CFG, + (tlmac->lmac_type << 8) | + tlmac->lane_to_sds); + + if (tlmac->lmac_type == BGX_MODE_SGMII) { + if (tlmac->is_1gx) { + /* This is actually 1000BASE-X, so + * mark the LMAC as such. + */ + bgx_reg_modify(bgx, count, + BGX_GMP_PCS_MISCX_CTL, + PCS_MISC_CTL_MODE); + } + + if (!bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis) { + /* The Linux DTS does not disable + * autoneg for this LMAC (in SGMII or + * 1000BASE-X mode), so that means + * enable autoneg. + */ + bgx_reg_modify(bgx, count, + BGX_GMP_PCS_MRX_CTL, + PCS_MRX_CTL_AN_EN); + } + } + + count += 1; + } + } + + /* Done probing all 4 lmacs, now clear qsgmii_configured */ + qsgmii_configured = 0; + + printf("BGX%d LMACs: %d\n", bgx->bgx_id, count); + bgx->lmac_count = count; + bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, count); + bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, count); + + bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); + if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) + printf("BGX%d BIST failed\n", bgx->bgx_id); + + /* Set the backpressure AND mask */ + for (i = 0; i < bgx->lmac_count; i++) + bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, + ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << + (i * MAX_BGX_CHANS_PER_LMAC)); + + /* Disable all MAC filtering */ + for (i = 0; i < RX_DMAC_COUNT; i++) + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); + + /* Disable MAC steering (NCSI traffic) */ + for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) + bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); +} + +static void bgx_get_qlm_mode(struct bgx *bgx) +{ + struct lmac *lmac; + int lmacid; + + /* Read LMACx type to figure out QLM mode + * This is configured by low level firmware + */ + for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) { + int lmac_type; + int train_en; + int index = 0; + + if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) && + bgx->bgx_id == 2)) + index = (lmacid < 2) ? 0 : 2; + + lmac = &bgx->lmac[lmacid]; + + /* check if QLM is programmed, if not, skip */ + if (lmac->qlm == -1) + continue; + + lmac_type = bgx_reg_read(bgx, index, BGX_CMRX_CFG); + lmac->lmac_type = (lmac_type >> 8) & 0x07; + debug("%s:%d:%d: lmac_type = %d, altpkg = %d\n", __func__, + bgx->bgx_id, lmacid, lmac->lmac_type, otx_is_altpkg()); + + train_en = (readq(GSERX_SCRATCH(lmac->qlm))) & 0xf; + lmac->is_1gx = bgx_reg_read(bgx, index, BGX_GMP_PCS_MISCX_CTL) + & (PCS_MISC_CTL_MODE) ? true : false; + + switch (lmac->lmac_type) { + case BGX_MODE_SGMII: + if (bgx->is_rgx) { + if (lmacid == 0) { + lmac->qlm_mode = QLM_MODE_RGMII; + debug("BGX%d LMAC%d mode: RGMII\n", + bgx->bgx_id, lmacid); + } + continue; + } else { + if (bgx->bgx_id == 0 && otx_is_altpkg()) { + if (lmacid % 2) + continue; + } + lmac->qlm_mode = QLM_MODE_SGMII; + debug("BGX%d QLM%d LMAC%d mode: %s\n", + bgx->bgx_id, lmac->qlm, lmacid, + lmac->is_1gx ? "1000Base-X" : "SGMII"); + } + break; + case BGX_MODE_XAUI: + if (bgx->bgx_id == 0 && otx_is_altpkg()) + continue; + lmac->qlm_mode = QLM_MODE_XAUI; + if (lmacid != 0) + continue; + debug("BGX%d QLM%d LMAC%d mode: XAUI\n", + bgx->bgx_id, lmac->qlm, lmacid); + break; + case BGX_MODE_RXAUI: + if (bgx->bgx_id == 0 && otx_is_altpkg()) + continue; + lmac->qlm_mode = QLM_MODE_RXAUI; + if (index == lmacid) { + debug("BGX%d QLM%d LMAC%d mode: RXAUI\n", + bgx->bgx_id, lmac->qlm, (index ? 1 : 0)); + } + break; + case BGX_MODE_XFI: + if (bgx->bgx_id == 0 && otx_is_altpkg()) { + if (lmacid % 2) + continue; + } + if ((lmacid < 2 && (train_en & (1 << lmacid))) || + (train_en & (1 << (lmacid - 2)))) { + lmac->qlm_mode = QLM_MODE_10G_KR; + debug("BGX%d QLM%d LMAC%d mode: 10G_KR\n", + bgx->bgx_id, lmac->qlm, lmacid); + } else { + lmac->qlm_mode = QLM_MODE_XFI; + debug("BGX%d QLM%d LMAC%d mode: XFI\n", + bgx->bgx_id, lmac->qlm, lmacid); + } + break; + case BGX_MODE_XLAUI: + if (bgx->bgx_id == 0 && otx_is_altpkg()) + continue; + if (train_en) { + lmac->qlm_mode = QLM_MODE_40G_KR4; + if (lmacid != 0) + break; + debug("BGX%d QLM%d LMAC%d mode: 40G_KR4\n", + bgx->bgx_id, lmac->qlm, lmacid); + } else { + lmac->qlm_mode = QLM_MODE_XLAUI; + if (lmacid != 0) + break; + debug("BGX%d QLM%d LMAC%d mode: XLAUI\n", + bgx->bgx_id, lmac->qlm, lmacid); + } + break; + case BGX_MODE_QSGMII: + /* If QLM is configured as QSGMII, use lmac0 */ + if (otx_is_soc(CN83XX) && lmacid == 2 && + bgx->bgx_id != 2) { + //lmac->qlm_mode = QLM_MODE_DISABLED; + continue; + } + + if (lmacid == 0 || lmacid == 2) { + lmac->qlm_mode = QLM_MODE_QSGMII; + debug("BGX%d QLM%d LMAC%d mode: QSGMII\n", + bgx->bgx_id, lmac->qlm, lmacid); + } + break; + default: + break; + } + } +} + +void bgx_set_board_info(int bgx_id, int *mdio_bus, + int *phy_addr, bool *autoneg_dis, bool *lmac_reg, + bool *lmac_enable) +{ + unsigned int i; + + for (i = 0; i < MAX_LMAC_PER_BGX; i++) { + bgx_board_info[bgx_id].phy_info[i].phy_addr = phy_addr[i]; + bgx_board_info[bgx_id].phy_info[i].mdio_bus = mdio_bus[i]; + bgx_board_info[bgx_id].phy_info[i].autoneg_dis = autoneg_dis[i]; + bgx_board_info[bgx_id].lmac_reg[i] = lmac_reg[i]; + bgx_board_info[bgx_id].lmac_enable[i] = lmac_enable[i]; + debug("%s bgx_id %d lmac %d\n", __func__, bgx_id, i); + debug("phy addr %x mdio bus %d autoneg_dis %d lmac_reg %d\n", + bgx_board_info[bgx_id].phy_info[i].phy_addr, + bgx_board_info[bgx_id].phy_info[i].mdio_bus, + bgx_board_info[bgx_id].phy_info[i].autoneg_dis, + bgx_board_info[bgx_id].lmac_reg[i]); + debug("lmac_enable = %x\n", + bgx_board_info[bgx_id].lmac_enable[i]); + } +} + +int octeontx_bgx_remove(struct udevice *dev) +{ + int lmacid; + u64 cfg; + int count = MAX_LMAC_PER_BGX; + struct bgx *bgx = dev_get_priv(dev); + + if (!bgx->reg_base) + return 0; + + if (bgx->is_rgx) + count = 1; + + for (lmacid = 0; lmacid < count; lmacid++) { + struct lmac *lmac; + + lmac = &bgx->lmac[lmacid]; + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + /* Disable PCS for 1G interface */ + if (lmac->lmac_type == BGX_MODE_SGMII || + lmac->lmac_type == BGX_MODE_QSGMII) { + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); + cfg |= PCS_MRX_CTL_PWR_DN; + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); + } + + debug("%s disabling bgx%d lmacid%d\n", __func__, bgx->bgx_id, + lmacid); + bgx_lmac_disable(bgx, lmacid); + } + return 0; +} + +int octeontx_bgx_probe(struct udevice *dev) +{ + struct bgx *bgx = dev_get_priv(dev); + u8 lmac = 0; + int qlm[4] = {-1, -1, -1, -1}; + int bgx_idx, node; + int inc = 1; + + bgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, + PCI_REGION_MEM); + if (!bgx->reg_base) { + debug("No PCI region found\n"); + return 0; + } + +#ifdef OCTEONTX_XCV + /* Use FAKE BGX2 for RGX interface */ + if ((((uintptr_t)bgx->reg_base >> 24) & 0xf) == 0x8) { + bgx->bgx_id = 2; + bgx->is_rgx = true; + for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) { + if (lmac == 0) { + bgx->lmac[lmac].lmacid = 0; + bgx->lmac[lmac].qlm = 0; + } else { + bgx->lmac[lmac].qlm = -1; + } + } + xcv_init_hw(); + goto skip_qlm_config; + } +#endif + + node = node_id(bgx->reg_base); + bgx_idx = ((uintptr_t)bgx->reg_base >> 24) & 3; + bgx->bgx_id = (node * MAX_BGX_PER_NODE) + bgx_idx; + if (otx_is_soc(CN81XX)) + inc = 2; + else if (otx_is_soc(CN83XX) && (bgx_idx == 2)) + inc = 2; + + for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac += inc) { + /* BGX3 (DLM4), has only 2 lanes */ + if (otx_is_soc(CN83XX) && bgx_idx == 3 && lmac >= 2) + continue; + qlm[lmac + 0] = get_qlm_for_bgx(node, bgx_idx, lmac); + /* Each DLM has 2 lanes, configure both lanes with + * same qlm configuration + */ + if (inc == 2) + qlm[lmac + 1] = qlm[lmac]; + debug("qlm[%d] = %d\n", lmac, qlm[lmac]); + } + + /* A BGX can take 1 or 2 DLMs, if both the DLMs are not configured + * as BGX, then return, nothing to initialize + */ + if (otx_is_soc(CN81XX)) + if ((qlm[0] == -1) && (qlm[2] == -1)) + return -ENODEV; + + /* MAP configuration registers */ + for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) { + bgx->lmac[lmac].qlm = qlm[lmac]; + bgx->lmac[lmac].lmacid = lmac; + } + +#ifdef OCTEONTX_XCV +skip_qlm_config: +#endif + bgx_vnic[bgx->bgx_id] = bgx; + bgx_get_qlm_mode(bgx); + debug("bgx_vnic[%u]: %p\n", bgx->bgx_id, bgx); + + bgx_init_hw(bgx); + + /* Init LMACs */ + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { + struct lmac *tlmac = &bgx->lmac[lmac]; + + tlmac->dev = dev; + tlmac->init_pend = 1; + tlmac->bgx = bgx; + } + + return 0; +} + +U_BOOT_DRIVER(octeontx_bgx) = { + .name = "octeontx_bgx", + .id = UCLASS_MISC, + .probe = octeontx_bgx_probe, + .remove = octeontx_bgx_remove, + .priv_auto_alloc_size = sizeof(struct bgx), + .flags = DM_FLAG_OS_PREPARE, +}; + +static struct pci_device_id octeontx_bgx_supported[] = { + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BGX) }, + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RGX) }, + {} +}; + +U_BOOT_PCI_DEVICE(octeontx_bgx, octeontx_bgx_supported); diff --git a/drivers/net/octeontx/bgx.h b/drivers/net/octeontx/bgx.h new file mode 100644 index 00000000000..8402630bf5a --- /dev/null +++ b/drivers/net/octeontx/bgx.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef BGX_H +#define BGX_H + +#include <asm/arch/board.h> + +/* PCI device IDs */ +#define PCI_DEVICE_ID_OCTEONTX_BGX 0xA026 +#define PCI_DEVICE_ID_OCTEONTX_RGX 0xA054 + +#define MAX_LMAC_PER_BGX 4 +#define MAX_BGX_CHANS_PER_LMAC 16 +#define MAX_DMAC_PER_LMAC 8 +#define MAX_FRAME_SIZE 9216 + +#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 + +#define MAX_LMAC (MAX_BGX_PER_NODE * MAX_LMAC_PER_BGX) + +#define NODE_ID_MASK 0x300000000000 +#define NODE_ID(x) (((x) & NODE_ID_MASK) >> 44) + +/* Registers */ +#define GSERX_CFG(x) (0x87E090000080ull + (x) * 0x1000000ull) +#define GSERX_SCRATCH(x) (0x87E090000020ull + (x) * 0x1000000ull) +#define GSERX_PHY_CTL(x) (0x87E090000000ull + (x) * 0x1000000ull) +#define GSERX_CFG_BGX BIT(2) +#define GSER_RX_EIE_DETSTS(x) (0x87E090000150ull + (x) * 0x1000000ull) +#define GSER_CDRLOCK (8) +#define GSER_BR_RXX_CTL(x, y) (0x87E090000400ull + (x) * 0x1000000ull + \ + (y) * 0x80) +#define GSER_BR_RXX_CTL_RXT_SWM BIT(2) +#define GSER_BR_RXX_EER(x, y) (0x87E090000418ull + (x) * 0x1000000ull + \ + (y) * 0x80) +#define GSER_BR_RXX_EER_RXT_ESV BIT(14) +#define GSER_BR_RXX_EER_RXT_EER BIT(15) +#define EER_RXT_ESV (14) + +#define BGX_CMRX_CFG 0x00 +#define CMR_PKT_TX_EN BIT_ULL(13) +#define CMR_PKT_RX_EN BIT_ULL(14) +#define CMR_EN BIT_ULL(15) +#define BGX_CMR_GLOBAL_CFG 0x08 +#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6) +#define BGX_CMRX_RX_ID_MAP 0x60 +#define BGX_CMRX_RX_STAT0 0x70 +#define BGX_CMRX_RX_STAT1 0x78 +#define BGX_CMRX_RX_STAT2 0x80 +#define BGX_CMRX_RX_STAT3 0x88 +#define BGX_CMRX_RX_STAT4 0x90 +#define BGX_CMRX_RX_STAT5 0x98 +#define BGX_CMRX_RX_STAT6 0xA0 +#define BGX_CMRX_RX_STAT7 0xA8 +#define BGX_CMRX_RX_STAT8 0xB0 +#define BGX_CMRX_RX_STAT9 0xB8 +#define BGX_CMRX_RX_STAT10 0xC0 +#define BGX_CMRX_RX_BP_DROP 0xC8 +#define BGX_CMRX_RX_DMAC_CTL 0x0E8 +#define BGX_CMR_RX_DMACX_CAM 0x200 +#define RX_DMACX_CAM_EN BIT_ULL(48) +#define RX_DMACX_CAM_LMACID(x) ((x) << 49) +#define RX_DMAC_COUNT 32 +#define BGX_CMR_RX_STREERING 0x300 +#define RX_TRAFFIC_STEER_RULE_COUNT 8 +#define BGX_CMR_CHAN_MSK_AND 0x450 +#define BGX_CMR_BIST_STATUS 0x460 +#define BGX_CMR_RX_LMACS 0x468 +#define BGX_CMRX_TX_STAT0 0x600 +#define BGX_CMRX_TX_STAT1 0x608 +#define BGX_CMRX_TX_STAT2 0x610 +#define BGX_CMRX_TX_STAT3 0x618 +#define BGX_CMRX_TX_STAT4 0x620 +#define BGX_CMRX_TX_STAT5 0x628 +#define BGX_CMRX_TX_STAT6 0x630 +#define BGX_CMRX_TX_STAT7 0x638 +#define BGX_CMRX_TX_STAT8 0x640 +#define BGX_CMRX_TX_STAT9 0x648 +#define BGX_CMRX_TX_STAT10 0x650 +#define BGX_CMRX_TX_STAT11 0x658 +#define BGX_CMRX_TX_STAT12 0x660 +#define BGX_CMRX_TX_STAT13 0x668 +#define BGX_CMRX_TX_STAT14 0x670 +#define BGX_CMRX_TX_STAT15 0x678 +#define BGX_CMRX_TX_STAT16 0x680 +#define BGX_CMRX_TX_STAT17 0x688 +#define BGX_CMR_TX_LMACS 0x1000 + +#define BGX_SPUX_CONTROL1 0x10000 +#define SPU_CTL_LOW_POWER BIT_ULL(11) +#define SPU_CTL_LOOPBACK BIT_ULL(14) +#define SPU_CTL_RESET BIT_ULL(15) +#define BGX_SPUX_STATUS1 0x10008 +#define SPU_STATUS1_RCV_LNK BIT_ULL(2) +#define BGX_SPUX_STATUS2 0x10020 +#define SPU_STATUS2_RCVFLT BIT_ULL(10) +#define BGX_SPUX_BX_STATUS 0x10028 +#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12) +#define BGX_SPUX_BR_STATUS1 0x10030 +#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0) +#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12) +#define BGX_SPUX_BR_PMD_CRTL 0x10068 +#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1) +#define BGX_SPUX_BR_PMD_LP_CUP 0x10078 +#define BGX_SPUX_BR_PMD_LD_CUP 0x10088 +#define BGX_SPUX_BR_PMD_LD_REP 0x10090 +#define BGX_SPUX_FEC_CONTROL 0x100A0 +#define SPU_FEC_CTL_FEC_EN BIT_ULL(0) +#define SPU_FEC_CTL_ERR_EN BIT_ULL(1) +#define BGX_SPUX_AN_CONTROL 0x100C8 +#define SPU_AN_CTL_AN_EN BIT_ULL(12) +#define SPU_AN_CTL_XNP_EN BIT_ULL(13) +#define SPU_AN_CTL_AN_RESTART BIT_ULL(15) +#define BGX_SPUX_AN_STATUS 0x100D0 +#define SPU_AN_STS_AN_COMPLETE BIT_ULL(5) +#define BGX_SPUX_AN_ADV 0x100D8 +#define BGX_SPUX_MISC_CONTROL 0x10218 +#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10) +#define SPU_MISC_CTL_RX_DIS BIT_ULL(12) +#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */ +#define BGX_SPUX_INT_W1S 0x10228 +#define BGX_SPUX_INT_ENA_W1C 0x10230 +#define BGX_SPUX_INT_ENA_W1S 0x10238 +#define BGX_SPU_DBG_CONTROL 0x10300 +#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18) +#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29) + +#define BGX_SMUX_RX_INT 0x20000 +#define BGX_SMUX_RX_JABBER 0x20030 +#define BGX_SMUX_RX_CTL 0x20048 +#define SMU_RX_CTL_STATUS (3ull << 0) +#define BGX_SMUX_TX_APPEND 0x20100 +#define SMU_TX_APPEND_FCS_D BIT_ULL(2) +#define BGX_SMUX_TX_MIN_PKT 0x20118 +#define BGX_SMUX_TX_INT 0x20140 +#define BGX_SMUX_TX_CTL 0x20178 +#define SMU_TX_CTL_DIC_EN BIT_ULL(0) +#define SMU_TX_CTL_UNI_EN BIT_ULL(1) +#define SMU_TX_CTL_LNK_STATUS (3ull << 4) +#define BGX_SMUX_TX_THRESH 0x20180 +#define BGX_SMUX_CTL 0x20200 +#define SMU_CTL_RX_IDLE BIT_ULL(0) +#define SMU_CTL_TX_IDLE BIT_ULL(1) + +#define BGX_GMP_PCS_MRX_CTL 0x30000 +#define PCS_MRX_CTL_RST_AN BIT_ULL(9) +#define PCS_MRX_CTL_PWR_DN BIT_ULL(11) +#define PCS_MRX_CTL_AN_EN BIT_ULL(12) +#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14) +#define PCS_MRX_CTL_RESET BIT_ULL(15) +#define BGX_GMP_PCS_MRX_STATUS 0x30008 +#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) +#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 +#define BGX_GMP_PCS_SGM_AN_ADV 0x30068 +#define BGX_GMP_PCS_MISCX_CTL 0x30078 +#define PCS_MISCX_CTL_DISP_EN BIT_ULL(13) +#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) +#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full +#define PCS_MISC_CTL_MODE BIT_ULL(8) +#define BGX_GMP_GMI_PRTX_CFG 0x38020 +#define GMI_PORT_CFG_SPEED BIT_ULL(1) +#define GMI_PORT_CFG_DUPLEX BIT_ULL(2) +#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) +#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) +#define BGX_GMP_GMI_RXX_JABBER 0x38038 +#define BGX_GMP_GMI_TXX_THRESH 0x38210 +#define BGX_GMP_GMI_TXX_APPEND 0x38218 +#define BGX_GMP_GMI_TXX_SLOT 0x38220 +#define BGX_GMP_GMI_TXX_BURST 0x38228 +#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240 +#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300 + +#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */ +#define BGX_MSIX_VEC_0_29_CTL 0x400008 +#define BGX_MSIX_PBA_0 0x4F0000 + +/* MSI-X interrupts */ +#define BGX_MSIX_VECTORS 30 +#define BGX_LMAC_VEC_OFFSET 7 +#define BGX_MSIX_VEC_SHIFT 4 + +#define CMRX_INT 0 +#define SPUX_INT 1 +#define SMUX_RX_INT 2 +#define SMUX_TX_INT 3 +#define GMPX_PCS_INT 4 +#define GMPX_GMI_RX_INT 5 +#define GMPX_GMI_TX_INT 6 +#define CMR_MEM_INT 28 +#define SPU_MEM_INT 29 + +#define LMAC_INTR_LINK_UP BIT(0) +#define LMAC_INTR_LINK_DOWN BIT(1) + +/* RX_DMAC_CTL configuration*/ +enum MCAST_MODE { + MCAST_MODE_REJECT, + MCAST_MODE_ACCEPT, + MCAST_MODE_CAM_FILTER, + RSVD +}; + +#define BCAST_ACCEPT 1 +#define CAM_ACCEPT 1 + +int octeontx_bgx_initialize(unsigned int bgx_idx, unsigned int node); +void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); +void bgx_get_count(int node, int *bgx_count); +int bgx_get_lmac_count(int node, int bgx); +void bgx_print_stats(int bgx_idx, int lmac); +void xcv_init_hw(void); +void xcv_setup_link(bool link_up, int link_speed); + +#undef LINK_INTR_ENABLE + +enum qlm_mode { + QLM_MODE_SGMII, /* SGMII, each lane independent */ + QLM_MODE_XAUI, /* 1 XAUI or DXAUI, 4 lanes */ + QLM_MODE_RXAUI, /* 2 RXAUI, 2 lanes each */ + QLM_MODE_XFI, /* 4 XFI, 1 lane each */ + QLM_MODE_XLAUI, /* 1 XLAUI, 4 lanes each */ + QLM_MODE_10G_KR, /* 4 10GBASE-KR, 1 lane each */ + QLM_MODE_40G_KR4, /* 1 40GBASE-KR4, 4 lanes each */ + QLM_MODE_QSGMII, /* 4 QSGMII, each lane independent */ + QLM_MODE_RGMII, /* 1 RGX */ +}; + +struct phy_info { + int mdio_bus; + int phy_addr; + bool autoneg_dis; +}; + +struct bgx_board_info { + struct phy_info phy_info[MAX_LMAC_PER_BGX]; + bool lmac_reg[MAX_LMAC_PER_BGX]; + bool lmac_enable[MAX_LMAC_PER_BGX]; +}; + +enum LMAC_TYPE { + BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */ + BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */ + BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */ + BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */ + BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */ + BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */ + BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */ + BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */ + BGX_MODE_RGMII = 5, + BGX_MODE_QSGMII = 6, + BGX_MODE_INVALID = 7, +}; + +int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr); + +#endif /* BGX_H */ diff --git a/drivers/net/octeontx/nic.h b/drivers/net/octeontx/nic.h new file mode 100644 index 00000000000..af3576cfbba --- /dev/null +++ b/drivers/net/octeontx/nic.h @@ -0,0 +1,508 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef NIC_H +#define NIC_H + +#include <linux/netdevice.h> +#include "bgx.h" + +#define PCI_DEVICE_ID_CAVIUM_NICVF_1 0x0011 + +/* Subsystem device IDs */ +#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E +#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E +#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E + +#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E +#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134 +#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234 +#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334 + +#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */ +#define NIC_CHANS_PER_INF 128 +#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF) + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 0 +#define PCI_MSIX_REG_BAR_NUM 4 + +/* NIC SRIOV VF count */ +#define MAX_NUM_VFS_SUPPORTED 128 +#define DEFAULT_NUM_VF_ENABLED 8 + +#define NIC_TNS_BYPASS_MODE 0 +#define NIC_TNS_MODE 1 + +/* NIC priv flags */ +#define NIC_SRIOV_ENABLED BIT(0) +#define NIC_TNS_ENABLED BIT(1) + +/* VNIC HW optimiation features */ +#define VNIC_RX_CSUM_OFFLOAD_SUPPORT +#undef VNIC_TX_CSUM_OFFLOAD_SUPPORT +#undef VNIC_SG_SUPPORT +#undef VNIC_TSO_SUPPORT +#undef VNIC_LRO_SUPPORT +#undef VNIC_RSS_SUPPORT + +/* TSO not supported in Thunder pass1 */ +#ifdef VNIC_TSO_SUPPORT +#define VNIC_SW_TSO_SUPPORT +#undef VNIC_HW_TSO_SUPPORT +#endif + +/* ETHTOOL enable or disable, undef this to disable */ +#define NICVF_ETHTOOL_ENABLE + +/* Min/Max packet size */ +#define NIC_HW_MIN_FRS 64 +#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ + +/* Max pkinds */ +#define NIC_MAX_PKIND 16 + +/* Max when CPI_ALG is IP diffserv */ +#define NIC_MAX_CPI_PER_LMAC 64 + +/* NIC VF Interrupts */ +#define NICVF_INTR_CQ 0 +#define NICVF_INTR_SQ 1 +#define NICVF_INTR_RBDR 2 +#define NICVF_INTR_PKT_DROP 3 +#define NICVF_INTR_TCP_TIMER 4 +#define NICVF_INTR_MBOX 5 +#define NICVF_INTR_QS_ERR 6 + +#define NICVF_INTR_CQ_SHIFT 0 +#define NICVF_INTR_SQ_SHIFT 8 +#define NICVF_INTR_RBDR_SHIFT 16 +#define NICVF_INTR_PKT_DROP_SHIFT 20 +#define NICVF_INTR_TCP_TIMER_SHIFT 21 +#define NICVF_INTR_MBOX_SHIFT 22 +#define NICVF_INTR_QS_ERR_SHIFT 23 + +#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT) +#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT) +#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT) +#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT) +#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT) +#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT) +#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT) + +/* MSI-X interrupts */ +#define NIC_PF_MSIX_VECTORS 10 +#define NIC_VF_MSIX_VECTORS 20 + +#define NIC_PF_INTR_ID_ECC0_SBE 0 +#define NIC_PF_INTR_ID_ECC0_DBE 1 +#define NIC_PF_INTR_ID_ECC1_SBE 2 +#define NIC_PF_INTR_ID_ECC1_DBE 3 +#define NIC_PF_INTR_ID_ECC2_SBE 4 +#define NIC_PF_INTR_ID_ECC2_DBE 5 +#define NIC_PF_INTR_ID_ECC3_SBE 6 +#define NIC_PF_INTR_ID_ECC3_DBE 7 +#define NIC_PF_INTR_ID_MBOX0 8 +#define NIC_PF_INTR_ID_MBOX1 9 + +/* Global timer for CQ timer thresh interrupts + * Calculated for SCLK of 700Mhz + * value written should be a 1/16thof what is expected + * + * 1 tick per ms + */ +#define NICPF_CLK_PER_INT_TICK 43750 + +struct nicvf_cq_poll { + u8 cq_idx; /* Completion queue index */ +}; + +#define NIC_MAX_RSS_HASH_BITS 8 +#define NIC_MAX_RSS_IDR_TBL_SIZE BIT(NIC_MAX_RSS_HASH_BITS) +#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */ + +#ifdef VNIC_RSS_SUPPORT +struct nicvf_rss_info { + bool enable; +#define RSS_L2_EXTENDED_HASH_ENA BIT(0) +#define RSS_IP_HASH_ENA BIT(1) +#define RSS_TCP_HASH_ENA BIT(2) +#define RSS_TCP_SYN_DIS BIT(3) +#define RSS_UDP_HASH_ENA BIT(4) +#define RSS_L4_EXTENDED_HASH_ENA BIT(5) +#define RSS_ROCE_ENA BIT(6) +#define RSS_L3_BI_DIRECTION_ENA BIT(7) +#define RSS_L4_BI_DIRECTION_ENA BIT(8) + u64 cfg; + u8 hash_bits; + u16 rss_size; + u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; + u64 key[RSS_HASH_KEY_SIZE]; +}; +#endif + +enum rx_stats_reg_offset { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_RED = 0x4, + RX_RED_OCTS = 0x5, + RX_ORUN = 0x6, + RX_ORUN_OCTS = 0x7, + RX_FCS = 0x8, + RX_L2ERR = 0x9, + RX_DRP_BCAST = 0xa, + RX_DRP_MCAST = 0xb, + RX_DRP_L3BCAST = 0xc, + RX_DRP_L3MCAST = 0xd, + RX_STATS_ENUM_LAST, +}; + +enum tx_stats_reg_offset { + TX_OCTS = 0x0, + TX_UCAST = 0x1, + TX_BCAST = 0x2, + TX_MCAST = 0x3, + TX_DROP = 0x4, + TX_STATS_ENUM_LAST, +}; + +struct nicvf_hw_stats { + u64 rx_bytes_ok; + u64 rx_ucast_frames_ok; + u64 rx_bcast_frames_ok; + u64 rx_mcast_frames_ok; + u64 rx_fcs_errors; + u64 rx_l2_errors; + u64 rx_drop_red; + u64 rx_drop_red_bytes; + u64 rx_drop_overrun; + u64 rx_drop_overrun_bytes; + u64 rx_drop_bcast; + u64 rx_drop_mcast; + u64 rx_drop_l3_bcast; + u64 rx_drop_l3_mcast; + u64 tx_bytes_ok; + u64 tx_ucast_frames_ok; + u64 tx_bcast_frames_ok; + u64 tx_mcast_frames_ok; + u64 tx_drops; +}; + +struct nicvf_drv_stats { + /* Rx */ + u64 rx_frames_ok; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_jumbo; + u64 rx_drops; + /* Tx */ + u64 tx_frames_ok; + u64 tx_drops; + u64 tx_busy; + u64 tx_tso; +}; + +struct hw_info { + u8 bgx_cnt; + u8 chans_per_lmac; + u8 chans_per_bgx; /* Rx/Tx chans */ + u8 chans_per_rgx; + u8 chans_per_lbk; + u16 cpi_cnt; + u16 rssi_cnt; + u16 rss_ind_tbl_size; + u16 tl4_cnt; + u16 tl3_cnt; + u8 tl2_cnt; + u8 tl1_cnt; + bool tl1_per_bgx; /* TL1 per BGX or per LMAC */ + u8 model_id; +}; + +struct nicvf { + struct udevice *dev; + u8 vf_id; + bool sqs_mode:1; + bool loopback_supported:1; + u8 tns_mode; + u8 node; + u16 mtu; + struct queue_set *qs; +#define MAX_SQS_PER_VF_SINGLE_NODE 5 +#define MAX_SQS_PER_VF 11 + u8 num_qs; + void *addnl_qs; + u16 vf_mtu; + void __iomem *reg_base; +#define MAX_QUEUES_PER_QSET 8 + struct nicvf_cq_poll *napi[8]; + + u8 cpi_alg; + + struct nicvf_hw_stats stats; + struct nicvf_drv_stats drv_stats; + + struct nicpf *nicpf; + + /* VF <-> PF mailbox communication */ + bool pf_acked; + bool pf_nacked; + bool set_mac_pending; + + bool link_up; + u8 duplex; + u32 speed; + u8 rev_id; + u8 rx_queues; + u8 tx_queues; + + bool open; + bool rb_alloc_fail; + void *rcv_buf; + bool hw_tso; +}; + +static inline int node_id(void *addr) +{ + return ((uintptr_t)addr >> 44) & 0x3; +} + +struct nicpf { + struct udevice *udev; + struct hw_info *hw; + u8 node; + unsigned int flags; + u16 total_vf_cnt; /* Total num of VF supported */ + u16 num_vf_en; /* No of VF enabled */ + void __iomem *reg_base; /* Register start address */ + u16 rss_ind_tbl_size; + u8 num_sqs_en; /* Secondary qsets enabled */ + u64 nicvf[MAX_NUM_VFS_SUPPORTED]; + u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF]; + u8 pqs_vf[MAX_NUM_VFS_SUPPORTED]; + bool sqs_used[MAX_NUM_VFS_SUPPORTED]; + struct pkind_cfg pkind; + u8 bgx_cnt; + u8 rev_id; +#define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF)) +#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF) +#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF) + u8 vf_lmac_map[MAX_LMAC]; + u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; + u64 mac[MAX_NUM_VFS_SUPPORTED]; + bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; + u8 link[MAX_LMAC]; + u8 duplex[MAX_LMAC]; + u32 speed[MAX_LMAC]; + bool vf_enabled[MAX_NUM_VFS_SUPPORTED]; + u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; + u8 lmac_cnt; +}; + +/* PF <--> VF Mailbox communication + * Eight 64bit registers are shared between PF and VF. + * Separate set for each VF. + * Writing '1' into last register mbx7 means end of message. + */ + +/* PF <--> VF mailbox communication */ +#define NIC_PF_VF_MAILBOX_SIZE 2 +#define NIC_PF_VF_MBX_TIMEOUT 2000 /* ms */ + +/* Mailbox message types */ +#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */ +#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */ +#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */ +#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */ +#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */ +#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */ +#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */ +#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */ +#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */ +#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */ +#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */ +#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */ +#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */ +#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */ +#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */ +#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */ +#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */ +#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */ +#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */ +#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */ +#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */ +#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ +#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ +#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ + +struct nic_cfg_msg { + u8 msg; + u8 vf_id; + u8 node_id; + bool tns_mode:1; + bool sqs_mode:1; + bool loopback_supported:1; + u8 mac_addr[6]; +}; + +/* Qset configuration */ +struct qs_cfg_msg { + u8 msg; + u8 num; + u8 sqs_count; + u64 cfg; +}; + +/* Receive queue configuration */ +struct rq_cfg_msg { + u8 msg; + u8 qs_num; + u8 rq_num; + u64 cfg; +}; + +/* Send queue configuration */ +struct sq_cfg_msg { + u8 msg; + u8 qs_num; + u8 sq_num; + bool sqs_mode; + u64 cfg; +}; + +/* Set VF's MAC address */ +struct set_mac_msg { + u8 msg; + u8 vf_id; + u8 mac_addr[6]; +}; + +/* Set Maximum frame size */ +struct set_frs_msg { + u8 msg; + u8 vf_id; + u16 max_frs; +}; + +/* Set CPI algorithm type */ +struct cpi_cfg_msg { + u8 msg; + u8 vf_id; + u8 rq_cnt; + u8 cpi_alg; +}; + +/* Get RSS table size */ +struct rss_sz_msg { + u8 msg; + u8 vf_id; + u16 ind_tbl_size; +}; + +/* Set RSS configuration */ +struct rss_cfg_msg { + u8 msg; + u8 vf_id; + u8 hash_bits; + u8 tbl_len; + u8 tbl_offset; +#define RSS_IND_TBL_LEN_PER_MBX_MSG 8 + u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG]; +}; + +struct bgx_stats_msg { + u8 msg; + u8 vf_id; + u8 rx; + u8 idx; + u64 stats; +}; + +/* Physical interface link status */ +struct bgx_link_status { + u8 msg; + u8 link_up; + u8 duplex; + u32 speed; +}; + +#ifdef VNIC_MULTI_QSET_SUPPORT +/* Get Extra Qset IDs */ +struct sqs_alloc { + u8 msg; + u8 vf_id; + u8 qs_count; +}; + +struct nicvf_ptr { + u8 msg; + u8 vf_id; + bool sqs_mode; + u8 sqs_id; + u64 nicvf; +}; +#endif + +/* Set interface in loopback mode */ +struct set_loopback { + u8 msg; + u8 vf_id; + bool enable; +}; + +/* 128 bit shared memory between PF and each VF */ +union nic_mbx { + struct { u8 msg; } msg; + struct nic_cfg_msg nic_cfg; + struct qs_cfg_msg qs; + struct rq_cfg_msg rq; + struct sq_cfg_msg sq; + struct set_mac_msg mac; + struct set_frs_msg frs; + struct cpi_cfg_msg cpi_cfg; + struct rss_sz_msg rss_size; + struct rss_cfg_msg rss_cfg; + struct bgx_stats_msg bgx_stats; + struct bgx_link_status link_status; +#ifdef VNIC_MULTI_QSET_SUPPORT + struct sqs_alloc sqs_alloc; + struct nicvf_ptr nicvf; +#endif + struct set_loopback lbk; +}; + +int nicvf_set_real_num_queues(struct udevice *dev, + int tx_queues, int rx_queues); +int nicvf_open(struct udevice *dev); +void nicvf_stop(struct udevice *dev); +int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx); +void nicvf_update_stats(struct nicvf *nic); + +void nic_handle_mbx_intr(struct nicpf *nic, int vf); + +int bgx_poll_for_link(int node, int bgx_idx, int lmacid); +const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); +void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); +void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable); +void bgx_lmac_internal_loopback(int node, int bgx_idx, + int lmac_idx, bool enable); + +static inline bool pass1_silicon(unsigned int revision, int model_id) +{ + return ((revision < 8) && (model_id == 0x88)); +} + +static inline bool pass2_silicon(unsigned int revision, int model_id) +{ + return ((revision >= 8) && (model_id == 0x88)); +} + +#endif /* NIC_H */ diff --git a/drivers/net/octeontx/nic_main.c b/drivers/net/octeontx/nic_main.c new file mode 100644 index 00000000000..1a805f7a468 --- /dev/null +++ b/drivers/net/octeontx/nic_main.c @@ -0,0 +1,778 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <config.h> +#include <net.h> +#include <netdev.h> +#include <malloc.h> +#include <miiphy.h> +#include <dm.h> +#include <misc.h> +#include <pci.h> +#include <pci_ids.h> +#include <asm/io.h> +#include <linux/delay.h> + +#include "nic_reg.h" +#include "nic.h" +#include "q_struct.h" + +unsigned long rounddown_pow_of_two(unsigned long n) +{ + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + n |= n >> 32; + + return(n + 1); +} + +static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg); +static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, + struct sq_cfg_msg *sq); +static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf); +static int nic_rcv_queue_sw_sync(struct nicpf *nic); + +/* Register read/write APIs */ +static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val) +{ + writeq(val, nic->reg_base + offset); +} + +static u64 nic_reg_read(struct nicpf *nic, u64 offset) +{ + return readq(nic->reg_base + offset); +} + +static u64 nic_get_mbx_addr(int vf) +{ + return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT); +} + +static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) +{ + void __iomem *mbx_addr = (void *)(nic->reg_base + nic_get_mbx_addr(vf)); + u64 *msg = (u64 *)mbx; + + /* In first revision HW, mbox interrupt is triggerred + * when PF writes to MBOX(1), in next revisions when + * PF writes to MBOX(0) + */ + if (pass1_silicon(nic->rev_id, nic->hw->model_id)) { + /* see the comment for nic_reg_write()/nic_reg_read() + * functions above + */ + writeq(msg[0], mbx_addr); + writeq(msg[1], mbx_addr + 8); + } else { + writeq(msg[1], mbx_addr + 8); + writeq(msg[0], mbx_addr); + } +} + +static void nic_mbx_send_ready(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + int bgx_idx, lmac, timeout = 5, link = -1; + const u8 *mac; + + mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; + mbx.nic_cfg.vf_id = vf; + + if (nic->flags & NIC_TNS_ENABLED) + mbx.nic_cfg.tns_mode = NIC_TNS_MODE; + else + mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; + + if (vf < nic->num_vf_en) { + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); + if (mac) + memcpy((u8 *)&mbx.nic_cfg.mac_addr, mac, 6); + + while (timeout-- && (link <= 0)) { + link = bgx_poll_for_link(nic->node, bgx_idx, lmac); + debug("Link status: %d\n", link); + if (link <= 0) + mdelay(2000); + } + } +#ifdef VNIC_MULTI_QSET_SUPPORT + mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; +#endif + mbx.nic_cfg.node_id = nic->node; + + mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en; + + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* ACKs VF's mailbox message + * @vf: VF to which ACK to be sent + */ +static void nic_mbx_send_ack(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_ACK; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* NACKs VF's mailbox message that PF is not able to + * complete the action + * @vf: VF to which ACK to be sent + */ +static void nic_mbx_send_nack(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_NACK; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) +{ + int bgx_idx, lmac_idx; + + if (lbk->vf_id > nic->num_vf_en) + return -1; + + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); + lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); + + bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); + + return 0; +} + +/* Interrupt handler to handle mailbox messages from VFs */ +void nic_handle_mbx_intr(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + u64 *mbx_data; + u64 mbx_addr; + u64 reg_addr; + u64 cfg; + int bgx, lmac; + int i; + int ret = 0; + + nic->mbx_lock[vf] = true; + + mbx_addr = nic_get_mbx_addr(vf); + mbx_data = (u64 *)&mbx; + + for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { + *mbx_data = nic_reg_read(nic, mbx_addr); + mbx_data++; + mbx_addr += sizeof(u64); + } + + debug("%s: Mailbox msg %d from VF%d\n", __func__, mbx.msg.msg, vf); + switch (mbx.msg.msg) { + case NIC_MBOX_MSG_READY: + nic_mbx_send_ready(nic, vf); + if (vf < nic->num_vf_en) { + nic->link[vf] = 0; + nic->duplex[vf] = 0; + nic->speed[vf] = 0; + } + ret = 1; + break; + case NIC_MBOX_MSG_QS_CFG: + reg_addr = NIC_PF_QSET_0_127_CFG | + (mbx.qs.num << NIC_QS_ID_SHIFT); + cfg = mbx.qs.cfg; +#ifdef VNIC_MULTI_QSET_SUPPORT + /* Check if its a secondary Qset */ + if (vf >= nic->num_vf_en) { + cfg = cfg & (~0x7FULL); + /* Assign this Qset to primary Qset's VF */ + cfg |= nic->pqs_vf[vf]; + } +#endif + nic_reg_write(nic, reg_addr, cfg); + break; + case NIC_MBOX_MSG_RQ_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + /* Enable CQE_RX2_S extension in CQE_RX descriptor. + * This gets appended by default on 81xx/83xx chips, + * for consistency enabling the same on 88xx pass2 + * where this is introduced. + */ + if (pass2_silicon(nic->rev_id, nic->hw->model_id)) + nic_reg_write(nic, NIC_PF_RX_CFG, 0x01); + break; + case NIC_MBOX_MSG_RQ_BP_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + break; + case NIC_MBOX_MSG_RQ_SW_SYNC: + ret = nic_rcv_queue_sw_sync(nic); + break; + case NIC_MBOX_MSG_RQ_DROP_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + break; + case NIC_MBOX_MSG_SQ_CFG: + reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | + (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.sq.cfg); + nic_tx_channel_cfg(nic, mbx.qs.num, + (struct sq_cfg_msg *)&mbx.sq); + break; + case NIC_MBOX_MSG_SET_MAC: +#ifdef VNIC_MULTI_QSET_SUPPORT + if (vf >= nic->num_vf_en) + break; +#endif + lmac = mbx.mac.vf_id; + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); + bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); + break; + case NIC_MBOX_MSG_SET_MAX_FRS: + ret = nic_update_hw_frs(nic, mbx.frs.max_frs, + mbx.frs.vf_id); + break; + case NIC_MBOX_MSG_CPI_CFG: + nic_config_cpi(nic, &mbx.cpi_cfg); + break; +#ifdef VNIC_RSS_SUPPORT + case NIC_MBOX_MSG_RSS_SIZE: + nic_send_rss_size(nic, vf); + goto unlock; + case NIC_MBOX_MSG_RSS_CFG: + case NIC_MBOX_MSG_RSS_CFG_CONT: + nic_config_rss(nic, &mbx.rss_cfg); + break; +#endif + case NIC_MBOX_MSG_CFG_DONE: + /* Last message of VF config msg sequence */ + nic->vf_enabled[vf] = true; + if (vf >= nic->lmac_cnt) + goto unlock; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); + goto unlock; + case NIC_MBOX_MSG_SHUTDOWN: + /* First msg in VF teardown sequence */ + nic->vf_enabled[vf] = false; +#ifdef VNIC_MULTI_QSET_SUPPORT + if (vf >= nic->num_vf_en) + nic->sqs_used[vf - nic->num_vf_en] = false; + nic->pqs_vf[vf] = 0; +#endif + if (vf >= nic->lmac_cnt) + break; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); + break; +#ifdef VNIC_MULTI_QSET_SUPPORT + case NIC_MBOX_MSG_ALLOC_SQS: + nic_alloc_sqs(nic, &mbx.sqs_alloc); + goto unlock; + case NIC_MBOX_MSG_NICVF_PTR: + nic->nicvf[vf] = mbx.nicvf.nicvf; + break; + case NIC_MBOX_MSG_PNICVF_PTR: + nic_send_pnicvf(nic, vf); + goto unlock; + case NIC_MBOX_MSG_SNICVF_PTR: + nic_send_snicvf(nic, &mbx.nicvf); + goto unlock; +#endif + case NIC_MBOX_MSG_LOOPBACK: + ret = nic_config_loopback(nic, &mbx.lbk); + break; + default: + printf("Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); + break; + } + + if (!ret) + nic_mbx_send_ack(nic, vf); + else if (mbx.msg.msg != NIC_MBOX_MSG_READY) + nic_mbx_send_nack(nic, vf); +unlock: + nic->mbx_lock[vf] = false; +} + +static int nic_rcv_queue_sw_sync(struct nicpf *nic) +{ + int timeout = 20; + + nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); + while (timeout) { + if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) + break; + udelay(2000); + timeout--; + } + nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); + if (!timeout) { + printf("Recevie queue software sync failed"); + return 1; + } + return 0; +} + +static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) +{ + u64 *pkind = (u64 *)&nic->pkind; + + if (new_frs > NIC_HW_MAX_FRS || new_frs < NIC_HW_MIN_FRS) { + printf("Invalid MTU setting from VF%d rejected,", vf); + printf(" should be between %d and %d\n", NIC_HW_MIN_FRS, + NIC_HW_MAX_FRS); + return 1; + } + new_frs += ETH_HLEN; + if (new_frs <= nic->pkind.maxlen) + return 0; + + nic->pkind.maxlen = new_frs; + + nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *pkind); + return 0; +} + +/* Set minimum transmit packet size */ +static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) +{ + int lmac; + u64 lmac_cfg; + struct hw_info *hw = nic->hw; + int max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX; + + /* Max value that can be set is 60 */ + if (size > 52) + size = 52; + + /* CN81XX has RGX configured as FAKE BGX, adjust mac_lmac accordingly */ + if (hw->chans_per_rgx) + max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1; + + for (lmac = 0; lmac < max_lmac; lmac++) { + lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); + lmac_cfg &= ~(0xF << 2); + lmac_cfg |= ((size / 4) << 2); + nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); + } +} + +/* Function to check number of LMACs present and set VF to LMAC mapping. + * Mapping will be used while initializing channels. + */ +static void nic_set_lmac_vf_mapping(struct nicpf *nic) +{ + int bgx, bgx_count, next_bgx_lmac = 0; + int lmac, lmac_cnt = 0; + u64 lmac_credit; + + nic->num_vf_en = 0; + if (nic->flags & NIC_TNS_ENABLED) { + nic->num_vf_en = DEFAULT_NUM_VF_ENABLED; + return; + } + + bgx_get_count(nic->node, &bgx_count); + debug("bgx_count: %d\n", bgx_count); + + for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) { + if (!(bgx_count & (1 << bgx))) + continue; + nic->bgx_cnt++; + lmac_cnt = bgx_get_lmac_count(nic->node, bgx); + debug("lmac_cnt: %d for BGX%d\n", lmac_cnt, bgx); + for (lmac = 0; lmac < lmac_cnt; lmac++) + nic->vf_lmac_map[next_bgx_lmac++] = + NIC_SET_VF_LMAC_MAP(bgx, lmac); + nic->num_vf_en += lmac_cnt; + + /* Program LMAC credits */ + lmac_credit = (1ull << 1); /* chennel credit enable */ + lmac_credit |= (0x1ff << 2); + lmac_credit |= (((((48 * 1024) / lmac_cnt) - + NIC_HW_MAX_FRS) / 16) << 12); + lmac = bgx * MAX_LMAC_PER_BGX; + for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) + nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), + lmac_credit); + } +} + +static void nic_get_hw_info(struct nicpf *nic) +{ + u16 sdevid; + struct hw_info *hw = nic->hw; + + dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid); + + switch (sdevid) { + case PCI_SUBSYS_DEVID_88XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_NODE; + hw->chans_per_lmac = 16; + hw->chans_per_bgx = 128; + hw->cpi_cnt = 2048; + hw->rssi_cnt = 4096; + hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; + hw->tl3_cnt = 256; + hw->tl2_cnt = 64; + hw->tl1_cnt = 2; + hw->tl1_per_bgx = true; + hw->model_id = 0x88; + break; + case PCI_SUBSYS_DEVID_81XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_NODE; + hw->chans_per_lmac = 8; + hw->chans_per_bgx = 32; + hw->chans_per_rgx = 8; + hw->chans_per_lbk = 24; + hw->cpi_cnt = 512; + hw->rssi_cnt = 256; + hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */ + hw->tl3_cnt = 64; + hw->tl2_cnt = 16; + hw->tl1_cnt = 10; + hw->tl1_per_bgx = false; + hw->model_id = 0x81; + break; + case PCI_SUBSYS_DEVID_83XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_NODE; + hw->chans_per_lmac = 8; + hw->chans_per_bgx = 32; + hw->chans_per_lbk = 64; + hw->cpi_cnt = 2048; + hw->rssi_cnt = 1024; + hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */ + hw->tl3_cnt = 256; + hw->tl2_cnt = 64; + hw->tl1_cnt = 18; + hw->tl1_per_bgx = false; + hw->model_id = 0x83; + break; + } + + hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->udev); +} + +static void nic_init_hw(struct nicpf *nic) +{ + int i; + u64 reg; + u64 *pkind = (u64 *)&nic->pkind; + + /* Get HW capability info */ + nic_get_hw_info(nic); + + /* Enable NIC HW block */ + nic_reg_write(nic, NIC_PF_CFG, 0x3); + + /* Enable backpressure */ + nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); + nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, (1ULL << 63) | 0x08); + nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), + (1ULL << 63) | 0x09); + + for (i = 0; i < NIC_MAX_CHANS; i++) + nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (i << 3), 1); + + if (nic->flags & NIC_TNS_ENABLED) { + reg = NIC_TNS_MODE << 7; + reg |= 0x06; + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg); + reg &= ~0xFull; + reg |= 0x07; + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg); + } else { + /* Disable TNS mode on both interfaces */ + reg = NIC_TNS_BYPASS_MODE << 7; + reg |= 0x08; /* Block identifier */ + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg); + reg &= ~0xFull; + reg |= 0x09; + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg); + } + + /* PKIND configuration */ + nic->pkind.minlen = 0; + nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; + nic->pkind.lenerr_en = 1; + nic->pkind.rx_hdr = 0; + nic->pkind.hdr_sl = 0; + + for (i = 0; i < NIC_MAX_PKIND; i++) + nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), *pkind); + + nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); + + /* Timer config */ + nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); +} + +/* Channel parse index configuration */ +static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) +{ + struct hw_info *hw = nic->hw; + u32 vnic, bgx, lmac, chan; + u32 padd, cpi_count = 0; + u64 cpi_base, cpi, rssi_base, rssi; + u8 qset, rq_idx = 0; + + vnic = cfg->vf_id; + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + + chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); + cpi_base = vnic * NIC_MAX_CPI_PER_LMAC; + rssi_base = vnic * hw->rss_ind_tbl_size; + + /* Rx channel configuration */ + nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), + (1ull << 63) | (vnic << 0)); + nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), + ((u64)cfg->cpi_alg << 62) | (cpi_base << 48)); + + if (cfg->cpi_alg == CPI_ALG_NONE) + cpi_count = 1; + else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ + cpi_count = 8; + else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ + cpi_count = 16; + else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ + cpi_count = NIC_MAX_CPI_PER_LMAC; + + /* RSS Qset, Qidx mapping */ + qset = cfg->vf_id; + rssi = rssi_base; + for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { + nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), + (qset << 3) | rq_idx); + rq_idx++; + } + + rssi = 0; + cpi = cpi_base; + for (; cpi < (cpi_base + cpi_count); cpi++) { + /* Determine port to channel adder */ + if (cfg->cpi_alg != CPI_ALG_DIFF) + padd = cpi % cpi_count; + else + padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ + + /* Leave RSS_SIZE as '0' to disable RSS */ + if (pass1_silicon(nic->rev_id, nic->hw->model_id)) { + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), + (vnic << 24) | (padd << 16) | + (rssi_base + rssi)); + } else { + /* Set MPI_ALG to '0' to disable MCAM parsing */ + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), + (padd << 16)); + /* MPI index is same as CPI if MPI_ALG is not enabled */ + nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), + (vnic << 24) | (rssi_base + rssi)); + } + + if ((rssi + 1) >= cfg->rq_cnt) + continue; + + if (cfg->cpi_alg == CPI_ALG_VLAN) + rssi++; + else if (cfg->cpi_alg == CPI_ALG_VLAN16) + rssi = ((cpi - cpi_base) & 0xe) >> 1; + else if (cfg->cpi_alg == CPI_ALG_DIFF) + rssi = ((cpi - cpi_base) & 0x38) >> 3; + } + nic->cpi_base[cfg->vf_id] = cpi_base; + nic->rssi_base[cfg->vf_id] = rssi_base; +} + +/* Transmit channel configuration (TL4 -> TL3 -> Chan) + * VNIC0-SQ0 -> TL4(0) -> TL4A(0) -> TL3[0] -> BGX0/LMAC0/Chan0 + * VNIC1-SQ0 -> TL4(8) -> TL4A(2) -> TL3[2] -> BGX0/LMAC1/Chan0 + * VNIC2-SQ0 -> TL4(16) -> TL4A(4) -> TL3[4] -> BGX0/LMAC2/Chan0 + * VNIC3-SQ0 -> TL4(32) -> TL4A(6) -> TL3[6] -> BGX0/LMAC3/Chan0 + * VNIC4-SQ0 -> TL4(512) -> TL4A(128) -> TL3[128] -> BGX1/LMAC0/Chan0 + * VNIC5-SQ0 -> TL4(520) -> TL4A(130) -> TL3[130] -> BGX1/LMAC1/Chan0 + * VNIC6-SQ0 -> TL4(528) -> TL4A(132) -> TL3[132] -> BGX1/LMAC2/Chan0 + * VNIC7-SQ0 -> TL4(536) -> TL4A(134) -> TL3[134] -> BGX1/LMAC3/Chan0 + */ +static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, + struct sq_cfg_msg *sq) +{ + struct hw_info *hw = nic->hw; + u32 bgx, lmac, chan; + u32 tl2, tl3, tl4; + u32 rr_quantum; + u8 sq_idx = sq->sq_num; + u8 pqs_vnic = vnic; + int svf; + u16 sdevid; + + dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid); + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); + + /* 24 bytes for FCS, IPG and preamble */ + rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); + + /* For 88xx 0-511 TL4 transmits via BGX0 and + * 512-1023 TL4s transmit via BGX1. + */ + if (hw->tl1_per_bgx) { + tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt); + if (!sq->sqs_mode) { + tl4 += (lmac * MAX_QUEUES_PER_QSET); + } else { + for (svf = 0; svf < MAX_SQS_PER_VF_SINGLE_NODE; svf++) { + if (nic->vf_sqs[pqs_vnic][svf] == vnic) + break; + } + tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET); + tl4 += (lmac * MAX_QUEUES_PER_QSET * + MAX_SQS_PER_VF_SINGLE_NODE); + tl4 += (svf * MAX_QUEUES_PER_QSET); + } + } else { + tl4 = (vnic * MAX_QUEUES_PER_QSET); + } + + tl4 += sq_idx; + + tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt); + nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | + ((u64)vnic << NIC_QS_ID_SHIFT) | + ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); + nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), + ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); + + nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); + + /* On 88xx 0-127 channels are for BGX0 and + * 127-255 channels for BGX1. + * + * On 81xx/83xx TL3_CHAN reg should be configured with channel + * within LMAC i.e 0-7 and not the actual channel number like on 88xx + */ + chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); + if (hw->tl1_per_bgx) + nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); + else + nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0); + + /* Enable backpressure on the channel */ + nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); + + tl2 = tl3 >> 2; + nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); + nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); + /* No priorities as of now */ + nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); + + /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1' + * on 81xx/83xx TL2 needs to be configured to transmit to one of the + * possible LMACs. + * + * This register doesn't exist on 88xx. + */ + if (!hw->tl1_per_bgx) + nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3), + lmac + (bgx * MAX_LMAC_PER_BGX)); +} + +int nic_initialize(struct udevice *dev) +{ + struct nicpf *nic = dev_get_priv(dev); + + nic->udev = dev; + nic->hw = calloc(1, sizeof(struct hw_info)); + if (!nic->hw) + return -ENOMEM; + + /* MAP PF's configuration registers */ + nic->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, + PCI_REGION_MEM); + if (!nic->reg_base) { + printf("Cannot map config register space, aborting\n"); + goto exit; + } + + nic->node = node_id(nic->reg_base); + dm_pci_read_config8(dev, PCI_REVISION_ID, &nic->rev_id); + + /* By default set NIC in TNS bypass mode */ + nic->flags &= ~NIC_TNS_ENABLED; + + /* Initialize hardware */ + nic_init_hw(nic); + + nic_set_lmac_vf_mapping(nic); + + /* Set RSS TBL size for each VF */ + nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; + + nic->rss_ind_tbl_size = rounddown_pow_of_two(nic->rss_ind_tbl_size); + + return 0; +exit: + free(nic->hw); + return -ENODEV; +} + +int octeontx_nic_probe(struct udevice *dev) +{ + int ret = 0; + struct nicpf *nicpf = dev_get_priv(dev); + + nicpf->udev = dev; + ret = nic_initialize(dev); + if (ret < 0) { + printf("couldn't initialize NIC PF\n"); + return ret; + } + + ret = pci_sriov_init(dev, nicpf->num_vf_en); + if (ret < 0) + printf("enabling SRIOV failed for num VFs %d\n", + nicpf->num_vf_en); + + return ret; +} + +U_BOOT_DRIVER(octeontx_nic) = { + .name = "octeontx_nic", + .id = UCLASS_MISC, + .probe = octeontx_nic_probe, + .priv_auto_alloc_size = sizeof(struct nicpf), +}; + +static struct pci_device_id octeontx_nic_supported[] = { + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NIC) }, + {} +}; + +U_BOOT_PCI_DEVICE(octeontx_nic, octeontx_nic_supported); + diff --git a/drivers/net/octeontx/nic_reg.h b/drivers/net/octeontx/nic_reg.h new file mode 100644 index 00000000000..c214ebb6799 --- /dev/null +++ b/drivers/net/octeontx/nic_reg.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef NIC_REG_H +#define NIC_REG_H + +#define NIC_PF_REG_COUNT 29573 +#define NIC_VF_REG_COUNT 249 + +/* Physical function register offsets */ +#define NIC_PF_CFG (0x0000) +#define NIC_PF_STATUS (0x0010) + +#define NIC_PF_INTR_TIMER_CFG (0x0030) +#define NIC_PF_BIST_STATUS (0x0040) +#define NIC_PF_SOFT_RESET (0x0050) + +#define NIC_PF_TCP_TIMER (0x0060) +#define NIC_PF_BP_CFG (0x0080) +#define NIC_PF_RRM_CFG (0x0088) +#define NIC_PF_CQM_CF (0x00A0) +#define NIC_PF_CNM_CF (0x00A8) +#define NIC_PF_CNM_STATUS (0x00B0) +#define NIC_PF_CQ_AVG_CFG (0x00C0) +#define NIC_PF_RRM_AVG_CFG (0x00C8) + +#define NIC_PF_INTF_0_1_SEND_CFG (0x0200) +#define NIC_PF_INTF_0_1_BP_CFG (0x0208) +#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210) +#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220) +#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240) + +#define NIC_PF_MAILBOX_INT (0x0410) +#define NIC_PF_MAILBOX_INT_W1S (0x0430) +#define NIC_PF_MAILBOX_ENA_W1C (0x0450) +#define NIC_PF_MAILBOX_ENA_W1S (0x0470) + +#define NIC_PF_RX_ETYPE_0_7 (0x0500) +#define NIC_PF_RX_CFG (0x05D0) +#define NIC_PF_PKIND_0_15_CFG (0x0600) + +#define NIC_PF_ECC0_FLIP0 (0x1000) +#define NIC_PF_ECC1_FLIP0 (0x1008) +#define NIC_PF_ECC2_FLIP0 (0x1010) +#define NIC_PF_ECC3_FLIP0 (0x1018) +#define NIC_PF_ECC0_FLIP1 (0x1080) +#define NIC_PF_ECC1_FLIP1 (0x1088) +#define NIC_PF_ECC2_FLIP1 (0x1090) +#define NIC_PF_ECC3_FLIP1 (0x1098) +#define NIC_PF_ECC0_CDIS (0x1100) +#define NIC_PF_ECC1_CDIS (0x1108) +#define NIC_PF_ECC2_CDIS (0x1110) +#define NIC_PF_ECC3_CDIS (0x1118) +#define NIC_PF_BIST0_STATUS (0x1280) +#define NIC_PF_BIST1_STATUS (0x1288) +#define NIC_PF_BIST2_STATUS (0x1290) +#define NIC_PF_BIST3_STATUS (0x1298) + +#define NIC_PF_ECC0_SBE_INT (0x2000) +#define NIC_PF_ECC0_SBE_INT_W1S (0x2008) +#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010) +#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018) +#define NIC_PF_ECC0_DBE_INT (0x2100) +#define NIC_PF_ECC0_DBE_INT_W1S (0x2108) +#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110) +#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118) + +#define NIC_PF_ECC1_SBE_INT (0x2200) +#define NIC_PF_ECC1_SBE_INT_W1S (0x2208) +#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210) +#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218) +#define NIC_PF_ECC1_DBE_INT (0x2300) +#define NIC_PF_ECC1_DBE_INT_W1S (0x2308) +#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310) +#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318) + +#define NIC_PF_ECC2_SBE_INT (0x2400) +#define NIC_PF_ECC2_SBE_INT_W1S (0x2408) +#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410) +#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418) +#define NIC_PF_ECC2_DBE_INT (0x2500) +#define NIC_PF_ECC2_DBE_INT_W1S (0x2508) +#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510) +#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518) + +#define NIC_PF_ECC3_SBE_INT (0x2600) +#define NIC_PF_ECC3_SBE_INT_W1S (0x2608) +#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610) +#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618) +#define NIC_PF_ECC3_DBE_INT (0x2700) +#define NIC_PF_ECC3_DBE_INT_W1S (0x2708) +#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710) +#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718) + +#define NIC_PF_CPI_0_2047_CFG (0x200000) +#define NIC_PF_MPI_0_2047_CFG (0x210000) +#define NIC_PF_RSSI_0_4097_RQ (0x220000) +#define NIC_PF_LMAC_0_7_CFG (0x240000) +#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) +#define NIC_PF_LMAC_0_7_CREDIT (0x244000) +#define NIC_PF_CHAN_0_255_TX_CFG (0x400000) +#define NIC_PF_CHAN_0_255_RX_CFG (0x420000) +#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000) +#define NIC_PF_CHAN_0_255_CREDIT (0x460000) +#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000) + +#define NIC_PF_SW_SYNC_RX (0x490000) + +#define NIC_PF_SW_SYNC_RX_DONE (0x490008) +#define NIC_PF_TL2_0_63_CFG (0x500000) +#define NIC_PF_TL2_0_63_PRI (0x520000) +#define NIC_PF_TL2_LMAC (0x540000) +#define NIC_PF_TL2_0_63_SH_STATUS (0x580000) +#define NIC_PF_TL3A_0_63_CFG (0x5F0000) +#define NIC_PF_TL3_0_255_CFG (0x600000) +#define NIC_PF_TL3_0_255_CHAN (0x620000) +#define NIC_PF_TL3_0_255_PIR (0x640000) +#define NIC_PF_TL3_0_255_SW_XOFF (0x660000) +#define NIC_PF_TL3_0_255_CNM_RATE (0x680000) +#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000) +#define NIC_PF_TL4A_0_255_CFG (0x6F0000) +#define NIC_PF_TL4_0_1023_CFG (0x800000) +#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000) +#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000) +#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000) +#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000) + +#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030) +#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000) +#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100) +#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000) +#define NIC_PF_QSET_0_127_CFG (0x20010000) +#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400) +#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420) +#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500) +#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600) +#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00) +#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08) +#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00) + +#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000) +#define NIC_PF_MSIX_VEC_0_CTL (0x000008) +#define NIC_PF_MSIX_PBA_0 (0x0F0000) + +/* Virtual function register offsets */ +#define NIC_VNIC_CFG (0x000020) +#define NIC_VF_PF_MAILBOX_0_1 (0x000130) +#define NIC_VF_INT (0x000200) +#define NIC_VF_INT_W1S (0x000220) +#define NIC_VF_ENA_W1C (0x000240) +#define NIC_VF_ENA_W1S (0x000260) + +#define NIC_VNIC_RSS_CFG (0x0020E0) +#define NIC_VNIC_RSS_KEY_0_4 (0x002200) +#define NIC_VNIC_TX_STAT_0_4 (0x004000) +#define NIC_VNIC_RX_STAT_0_13 (0x004100) +#define NIC_QSET_RQ_GEN_CFG (0x010010) + +#define NIC_QSET_CQ_0_7_CFG (0x010400) +#define NIC_QSET_CQ_0_7_CFG2 (0x010408) +#define NIC_QSET_CQ_0_7_THRESH (0x010410) +#define NIC_QSET_CQ_0_7_BASE (0x010420) +#define NIC_QSET_CQ_0_7_HEAD (0x010428) +#define NIC_QSET_CQ_0_7_TAIL (0x010430) +#define NIC_QSET_CQ_0_7_DOOR (0x010438) +#define NIC_QSET_CQ_0_7_STATUS (0x010440) +#define NIC_QSET_CQ_0_7_STATUS2 (0x010448) +#define NIC_QSET_CQ_0_7_DEBUG (0x010450) + +#define NIC_QSET_RQ_0_7_CFG (0x010600) +#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700) + +#define NIC_QSET_SQ_0_7_CFG (0x010800) +#define NIC_QSET_SQ_0_7_THRESH (0x010810) +#define NIC_QSET_SQ_0_7_BASE (0x010820) +#define NIC_QSET_SQ_0_7_HEAD (0x010828) +#define NIC_QSET_SQ_0_7_TAIL (0x010830) +#define NIC_QSET_SQ_0_7_DOOR (0x010838) +#define NIC_QSET_SQ_0_7_STATUS (0x010840) +#define NIC_QSET_SQ_0_7_DEBUG (0x010848) +#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860) +#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) + +#define NIC_QSET_RBDR_0_1_CFG (0x010C00) +#define NIC_QSET_RBDR_0_1_THRESH (0x010C10) +#define NIC_QSET_RBDR_0_1_BASE (0x010C20) +#define NIC_QSET_RBDR_0_1_HEAD (0x010C28) +#define NIC_QSET_RBDR_0_1_TAIL (0x010C30) +#define NIC_QSET_RBDR_0_1_DOOR (0x010C38) +#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40) +#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48) +#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50) + +#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000) +#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008) +#define NIC_VF_MSIX_PBA (0x0F0000) + +/* Offsets within registers */ +#define NIC_MSIX_VEC_SHIFT 4 +#define NIC_Q_NUM_SHIFT 18 +#define NIC_QS_ID_SHIFT 21 +#define NIC_VF_NUM_SHIFT 21 + +/* Port kind configuration register */ +struct pkind_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + uint64_t reserved_42_63:22; + uint64_t hdr_sl:5; /* Header skip length */ + uint64_t rx_hdr:3; /* TNS Receive header present */ + uint64_t lenerr_en:1; /* L2 length error check enable */ + uint64_t reserved_32_32:1; + uint64_t maxlen:16; /* Max frame size */ + uint64_t minlen:16; /* Min frame size */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + uint64_t minlen:16; + uint64_t maxlen:16; + uint64_t reserved_32_32:1; + uint64_t lenerr_en:1; + uint64_t rx_hdr:3; + uint64_t hdr_sl:5; + uint64_t reserved_42_63:22; +#endif +}; + +static inline uint64_t BGXX_PF_BAR0(unsigned long param1) + __attribute__ ((pure, always_inline)); +static inline uint64_t BGXX_PF_BAR0(unsigned long param1) +{ + assert(param1 <= 1); + return 0x87E0E0000000 + (param1 << 24); +} + +#define BGXX_PF_BAR0_SIZE 0x400000 +#define NIC_PF_BAR0 0x843000000000 +#define NIC_PF_BAR0_SIZE 0x40000000 + +static inline uint64_t NIC_VFX_BAR0(unsigned long param1) + __attribute__ ((pure, always_inline)); +static inline uint64_t NIC_VFX_BAR0(unsigned long param1) +{ + assert(param1 <= 127); + + return 0x8430A0000000 + (param1 << 21); +} + +#define NIC_VFX_BAR0_SIZE 0x200000 + +#endif /* NIC_REG_H */ diff --git a/drivers/net/octeontx/nicvf_main.c b/drivers/net/octeontx/nicvf_main.c new file mode 100644 index 00000000000..e13c8b95569 --- /dev/null +++ b/drivers/net/octeontx/nicvf_main.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <malloc.h> +#include <misc.h> +#include <net.h> +#include <pci.h> +#include <pci_ids.h> +#include <phy.h> +#include <asm/io.h> +#include <linux/delay.h> + +#include "nic_reg.h" +#include "nic.h" +#include "nicvf_queues.h" + +/* Register read/write APIs */ +void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val) +{ + writeq(val, nic->reg_base + offset); +} + +u64 nicvf_reg_read(struct nicvf *nic, u64 offset) +{ + return readq(nic->reg_base + offset); +} + +void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, + u64 qidx, u64 val) +{ + void *addr = nic->reg_base + offset; + + writeq(val, (void *)(addr + (qidx << NIC_Q_NUM_SHIFT))); +} + +u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) +{ + void *addr = nic->reg_base + offset; + + return readq((void *)(addr + (qidx << NIC_Q_NUM_SHIFT))); +} + +static void nicvf_handle_mbx_intr(struct nicvf *nic); + +/* VF -> PF mailbox communication */ +static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) +{ + u64 *msg = (u64 *)mbx; + + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); +} + +int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) +{ + int timeout = NIC_PF_VF_MBX_TIMEOUT; + int sleep = 10; + + nic->pf_acked = false; + nic->pf_nacked = false; + + nicvf_write_to_mbx(nic, mbx); + + nic_handle_mbx_intr(nic->nicpf, nic->vf_id); + + /* Wait for previous message to be acked, timeout 2sec */ + while (!nic->pf_acked) { + if (nic->pf_nacked) + return -1; + mdelay(sleep); + nicvf_handle_mbx_intr(nic); + + if (nic->pf_acked) + break; + timeout -= sleep; + if (!timeout) { + printf("PF didn't ack to mbox msg %d from VF%d\n", + (mbx->msg.msg & 0xFF), nic->vf_id); + return -1; + } + } + + return 0; +} + +/* Checks if VF is able to comminicate with PF + * and also gets the VNIC number this VF is associated to. + */ +static int nicvf_check_pf_ready(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_READY; + if (nicvf_send_msg_to_pf(nic, &mbx)) { + printf("PF didn't respond to READY msg\n"); + return 0; + } + + return 1; +} + +static void nicvf_handle_mbx_intr(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + struct eth_pdata *pdata = dev_get_platdata(nic->dev); + u64 *mbx_data; + u64 mbx_addr; + int i; + + mbx_addr = NIC_VF_PF_MAILBOX_0_1; + mbx_data = (u64 *)&mbx; + + for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { + *mbx_data = nicvf_reg_read(nic, mbx_addr); + mbx_data++; + mbx_addr += sizeof(u64); + } + + debug("Mbox message: msg: 0x%x\n", mbx.msg.msg); + switch (mbx.msg.msg) { + case NIC_MBOX_MSG_READY: + nic->pf_acked = true; + nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; + nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; + nic->node = mbx.nic_cfg.node_id; + if (!nic->set_mac_pending) + memcpy(pdata->enetaddr, + mbx.nic_cfg.mac_addr, 6); + nic->loopback_supported = mbx.nic_cfg.loopback_supported; + nic->link_up = false; + nic->duplex = 0; + nic->speed = 0; + break; + case NIC_MBOX_MSG_ACK: + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_NACK: + nic->pf_nacked = true; + break; + case NIC_MBOX_MSG_BGX_LINK_CHANGE: + nic->pf_acked = true; + nic->link_up = mbx.link_status.link_up; + nic->duplex = mbx.link_status.duplex; + nic->speed = mbx.link_status.speed; + if (nic->link_up) { + printf("%s: Link is Up %d Mbps %s\n", + nic->dev->name, nic->speed, + nic->duplex == 1 ? + "Full duplex" : "Half duplex"); + } else { + printf("%s: Link is Down\n", nic->dev->name); + } + break; + default: + printf("Invalid message from PF, msg 0x%x\n", mbx.msg.msg); + break; + } + + nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); +} + +static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct udevice *dev) +{ + union nic_mbx mbx = {}; + struct eth_pdata *pdata = dev_get_platdata(dev); + + mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; + mbx.mac.vf_id = nic->vf_id; + memcpy(mbx.mac.mac_addr, pdata->enetaddr, 6); + + return nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_config_cpi(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; + mbx.cpi_cfg.vf_id = nic->vf_id; + mbx.cpi_cfg.cpi_alg = nic->cpi_alg; + mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; + + nicvf_send_msg_to_pf(nic, &mbx); +} + +static int nicvf_init_resources(struct nicvf *nic) +{ + int err; + + nic->num_qs = 1; + + /* Enable Qset */ + nicvf_qset_config(nic, true); + + /* Initialize queues and HW for data transfer */ + err = nicvf_config_data_transfer(nic, true); + + if (err) { + printf("Failed to alloc/config VF's QSet resources\n"); + return err; + } + return 0; +} + +static void nicvf_snd_pkt_handler(struct nicvf *nic, + struct cmp_queue *cq, + void *cq_desc, int cqe_type) +{ + struct cqe_send_t *cqe_tx; + struct snd_queue *sq; + struct sq_hdr_subdesc *hdr; + + cqe_tx = (struct cqe_send_t *)cq_desc; + sq = &nic->qs->sq[cqe_tx->sq_idx]; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); + if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) + return; + + nicvf_check_cqe_tx_errs(nic, cq, cq_desc); + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); +} + +static int nicvf_rcv_pkt_handler(struct nicvf *nic, + struct cmp_queue *cq, void *cq_desc, + void **ppkt, int cqe_type) +{ + void *pkt; + + size_t pkt_len; + struct cqe_rx_t *cqe_rx = (struct cqe_rx_t *)cq_desc; + int err = 0; + + /* Check for errors */ + err = nicvf_check_cqe_rx_errs(nic, cq, cq_desc); + if (err && !cqe_rx->rb_cnt) + return -1; + + pkt = nicvf_get_rcv_pkt(nic, cq_desc, &pkt_len); + if (!pkt) { + debug("Packet not received\n"); + return -1; + } + + if (pkt) + *ppkt = pkt; + + return pkt_len; +} + +int nicvf_cq_handler(struct nicvf *nic, void **ppkt, int *pkt_len) +{ + int cq_qnum = 0; + int processed_sq_cqe = 0; + int processed_rq_cqe = 0; + int processed_cqe = 0; + + unsigned long cqe_count, cqe_head; + struct queue_set *qs = nic->qs; + struct cmp_queue *cq = &qs->cq[cq_qnum]; + struct cqe_rx_t *cq_desc; + + /* Get num of valid CQ entries expect next one to be SQ completion */ + cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_qnum); + cqe_count &= 0xFFFF; + if (!cqe_count) + return 0; + + /* Get head of the valid CQ entries */ + cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_qnum); + cqe_head >>= 9; + cqe_head &= 0xFFFF; + + if (cqe_count) { + /* Get the CQ descriptor */ + cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); + cqe_head++; + cqe_head &= (cq->dmem.q_len - 1); + /* Initiate prefetch for next descriptor */ + prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); + + switch (cq_desc->cqe_type) { + case CQE_TYPE_RX: + debug("%s: Got Rx CQE\n", nic->dev->name); + *pkt_len = nicvf_rcv_pkt_handler(nic, cq, cq_desc, + ppkt, CQE_TYPE_RX); + processed_rq_cqe++; + break; + case CQE_TYPE_SEND: + debug("%s: Got Tx CQE\n", nic->dev->name); + nicvf_snd_pkt_handler(nic, cq, cq_desc, CQE_TYPE_SEND); + processed_sq_cqe++; + break; + default: + debug("%s: Got CQ type %u\n", nic->dev->name, + cq_desc->cqe_type); + break; + } + processed_cqe++; + } + + /* Dequeue CQE */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, + cq_qnum, processed_cqe); + + asm volatile ("dsb sy"); + + return (processed_sq_cqe | processed_rq_cqe); +} + +/* Qset error interrupt handler + * + * As of now only CQ errors are handled + */ +void nicvf_handle_qs_err(struct nicvf *nic) +{ + struct queue_set *qs = nic->qs; + int qidx; + u64 status; + + /* Check if it is CQ err */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, + qidx); + if (!(status & CQ_ERR_MASK)) + continue; + /* Process already queued CQEs and reconfig CQ */ + nicvf_sq_disable(nic, qidx); + nicvf_cmp_queue_config(nic, qs, qidx, true); + nicvf_sq_free_used_descs(nic->dev, &qs->sq[qidx], qidx); + nicvf_sq_enable(nic, &qs->sq[qidx], qidx); + } +} + +static int nicvf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len) +{ + struct nicvf *nic = dev_get_priv(dev); + + if (pkt && pkt_len) + free(pkt); + nicvf_refill_rbdr(nic); + return 0; +} + +static int nicvf_xmit(struct udevice *dev, void *pkt, int pkt_len) +{ + struct nicvf *nic = dev_get_priv(dev); + int ret = 0; + int rcv_len = 0; + unsigned int timeout = 5000; + void *rpkt = NULL; + + if (!nicvf_sq_append_pkt(nic, pkt, pkt_len)) { + printf("VF%d: TX ring full\n", nic->vf_id); + return -1; + } + + /* check and update CQ for pkt sent */ + while (!ret && timeout--) { + ret = nicvf_cq_handler(nic, &rpkt, &rcv_len); + if (!ret) { + debug("%s: %d, Not sent\n", __func__, __LINE__); + udelay(10); + } + } + + return 0; +} + +static int nicvf_recv(struct udevice *dev, int flags, uchar **packetp) +{ + struct nicvf *nic = dev_get_priv(dev); + void *pkt; + int pkt_len = 0; +#ifdef DEBUG + u8 *dpkt; + int i, j; +#endif + + nicvf_cq_handler(nic, &pkt, &pkt_len); + + if (pkt_len) { +#ifdef DEBUG + dpkt = pkt; + printf("RX packet contents:\n"); + for (i = 0; i < 8; i++) { + puts("\t"); + for (j = 0; j < 10; j++) + printf("%02x ", dpkt[i * 10 + j]); + puts("\n"); + } +#endif + *packetp = pkt; + } + + return pkt_len; +} + +void nicvf_stop(struct udevice *dev) +{ + struct nicvf *nic = dev_get_priv(dev); + + if (!nic->open) + return; + + /* Free resources */ + nicvf_config_data_transfer(nic, false); + + /* Disable HW Qset */ + nicvf_qset_config(nic, false); + + nic->open = false; +} + +int nicvf_open(struct udevice *dev) +{ + int err; + struct nicvf *nic = dev_get_priv(dev); + + nicvf_hw_set_mac_addr(nic, dev); + + /* Configure CPI alorithm */ + nic->cpi_alg = CPI_ALG_NONE; + nicvf_config_cpi(nic); + + /* Initialize the queues */ + err = nicvf_init_resources(nic); + if (err) + return -1; + + if (!nicvf_check_pf_ready(nic)) + return -1; + + nic->open = true; + + /* Make sure queue initialization is written */ + asm volatile("dsb sy"); + + return 0; +} + +int nicvf_write_hwaddr(struct udevice *dev) +{ + unsigned char ethaddr[ARP_HLEN]; + struct eth_pdata *pdata = dev_get_platdata(dev); + struct nicvf *nic = dev_get_priv(dev); + + /* If lower level firmware fails to set proper MAC + * u-boot framework updates MAC to random address. + * Use this hook to update mac address in environment. + */ + if (!eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr)) { + eth_env_set_enetaddr_by_index("eth", dev->seq, pdata->enetaddr); + debug("%s: pMAC %pM\n", __func__, pdata->enetaddr); + } + eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr); + if (memcmp(ethaddr, pdata->enetaddr, ARP_HLEN)) { + debug("%s: pMAC %pM\n", __func__, pdata->enetaddr); + nicvf_hw_set_mac_addr(nic, dev); + } + return 0; +} + +static void nicvf_probe_mdio_devices(void) +{ + struct udevice *pdev; + int err; + static int probed; + + if (probed) + return; + + err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_CAVIUM_SMI, 0, + &pdev); + if (err) + debug("%s couldn't find SMI device\n", __func__); + probed = 1; +} + +int nicvf_initialize(struct udevice *dev) +{ + struct nicvf *nicvf = dev_get_priv(dev); + struct eth_pdata *pdata = dev_get_platdata(dev); + int ret = 0, bgx, lmac; + char name[16]; + unsigned char ethaddr[ARP_HLEN]; + struct udevice *pfdev; + struct nicpf *pf; + static int vfid; + + if (dm_pci_find_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_CAVIUM_NIC, 0, &pfdev)) { + printf("%s NIC PF device not found..VF probe failed\n", + __func__); + return -1; + } + pf = dev_get_priv(pfdev); + nicvf->vf_id = vfid++; + nicvf->dev = dev; + nicvf->nicpf = pf; + + nicvf_probe_mdio_devices(); + + /* Enable TSO support */ + nicvf->hw_tso = true; + + nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, + PCI_REGION_MEM); + + debug("nicvf->reg_base: %p\n", nicvf->reg_base); + + if (!nicvf->reg_base) { + printf("Cannot map config register space, aborting\n"); + ret = -1; + goto fail; + } + + ret = nicvf_set_qset_resources(nicvf); + if (ret) + return -1; + + sprintf(name, "vnic%u", nicvf->vf_id); + debug("%s name %s\n", __func__, name); + device_set_name(dev, name); + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]); + debug("%s VF %d BGX %d LMAC %d\n", __func__, nicvf->vf_id, bgx, lmac); + debug("%s PF %p pfdev %p VF %p vfdev %p vf->pdata %p\n", + __func__, nicvf->nicpf, nicvf->nicpf->udev, nicvf, nicvf->dev, + pdata); + + fdt_board_get_ethaddr(bgx, lmac, ethaddr); + + debug("%s bgx %d lmac %d ethaddr %pM\n", __func__, bgx, lmac, ethaddr); + + if (is_valid_ethaddr(ethaddr)) { + memcpy(pdata->enetaddr, ethaddr, ARP_HLEN); + eth_env_set_enetaddr_by_index("eth", dev->seq, ethaddr); + } + debug("%s enetaddr %pM ethaddr %pM\n", __func__, + pdata->enetaddr, ethaddr); + +fail: + return ret; +} + +int octeontx_vnic_probe(struct udevice *dev) +{ + return nicvf_initialize(dev); +} + +static const struct eth_ops octeontx_vnic_ops = { + .start = nicvf_open, + .stop = nicvf_stop, + .send = nicvf_xmit, + .recv = nicvf_recv, + .free_pkt = nicvf_free_pkt, + .write_hwaddr = nicvf_write_hwaddr, +}; + +U_BOOT_DRIVER(octeontx_vnic) = { + .name = "vnic", + .id = UCLASS_ETH, + .probe = octeontx_vnic_probe, + .ops = &octeontx_vnic_ops, + .priv_auto_alloc_size = sizeof(struct nicvf), + .platdata_auto_alloc_size = sizeof(struct eth_pdata), +}; + +static struct pci_device_id octeontx_vnic_supported[] = { + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF) }, + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF_1) }, + {} +}; + +U_BOOT_PCI_DEVICE(octeontx_vnic, octeontx_vnic_supported); diff --git a/drivers/net/octeontx/nicvf_queues.c b/drivers/net/octeontx/nicvf_queues.c new file mode 100644 index 00000000000..c7f262f440d --- /dev/null +++ b/drivers/net/octeontx/nicvf_queues.c @@ -0,0 +1,1140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <cpu_func.h> +#include <dm/device.h> +#include <malloc.h> +#include <net.h> +#include <phy.h> +#include <linux/delay.h> + +#include "nic_reg.h" +#include "nic.h" +#include "q_struct.h" +#include "nicvf_queues.h" + +static int nicvf_poll_reg(struct nicvf *nic, int qidx, + u64 reg, int bit_pos, int bits, int val) +{ + u64 bit_mask; + u64 reg_val; + int timeout = 10; + + bit_mask = (1ULL << bits) - 1; + bit_mask = (bit_mask << bit_pos); + + while (timeout) { + reg_val = nicvf_queue_reg_read(nic, reg, qidx); + if (((reg_val & bit_mask) >> bit_pos) == val) + return 0; + udelay(2000); + timeout--; + } + printf("Poll on reg 0x%llx failed\n", reg); + return 1; +} + +static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, + int q_len, int desc_size, int align_bytes) +{ + dmem->q_len = q_len; + dmem->size = (desc_size * q_len) + align_bytes; + /* Save address, need it while freeing */ + dmem->unalign_base = calloc(1, dmem->size); + dmem->dma = (uintptr_t)dmem->unalign_base; + + if (!dmem->unalign_base) + return -1; + + /* Align memory address for 'align_bytes' */ + dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); + dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); + + return 0; +} + +static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) +{ + if (!dmem) + return; + + free(dmem->unalign_base); + + dmem->unalign_base = NULL; + dmem->base = NULL; +} + +static void *nicvf_rb_ptr_to_pkt(struct nicvf *nic, uintptr_t rb_ptr) +{ + return (void *)rb_ptr; +} + +static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, + int ring_len, int buf_size) +{ + int idx; + uintptr_t rbuf; + struct rbdr_entry_t *desc; + + if (nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, + sizeof(struct rbdr_entry_t), + NICVF_RCV_BUF_ALIGN_BYTES)) { + printf("Unable to allocate memory for rcv buffer ring\n"); + return -1; + } + + rbdr->desc = rbdr->dmem.base; + /* Buffer size has to be in multiples of 128 bytes */ + rbdr->dma_size = buf_size; + rbdr->enable = true; + rbdr->thresh = RBDR_THRESH; + + debug("%s: %d: allocating %lld bytes for rcv buffers\n", + __func__, __LINE__, + ring_len * buf_size + NICVF_RCV_BUF_ALIGN_BYTES); + rbdr->buf_mem = (uintptr_t)calloc(1, ring_len * buf_size + + NICVF_RCV_BUF_ALIGN_BYTES); + + if (!rbdr->buf_mem) { + printf("Unable to allocate memory for rcv buffers\n"); + return -1; + } + + rbdr->buffers = NICVF_ALIGNED_ADDR(rbdr->buf_mem, + NICVF_RCV_BUF_ALIGN_BYTES); + + debug("%s: %d: rbdr->buf_mem: %lx, rbdr->buffers: %lx\n", + __func__, __LINE__, rbdr->buf_mem, rbdr->buffers); + + for (idx = 0; idx < ring_len; idx++) { + rbuf = rbdr->buffers + DMA_BUFFER_LEN * idx; + desc = GET_RBDR_DESC(rbdr, idx); + desc->buf_addr = rbuf >> NICVF_RCV_BUF_ALIGN; + flush_dcache_range((uintptr_t)desc, + (uintptr_t)desc + sizeof(desc)); + } + return 0; +} + +static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) +{ + if (!rbdr) + return; + + rbdr->enable = false; + if (!rbdr->dmem.base) + return; + + debug("%s: %d: rbdr->buf_mem: %p\n", __func__, + __LINE__, (void *)rbdr->buf_mem); + free((void *)rbdr->buf_mem); + + /* Free RBDR ring */ + nicvf_free_q_desc_mem(nic, &rbdr->dmem); +} + +/* Refill receive buffer descriptors with new buffers. + * This runs in softirq context . + */ +void nicvf_refill_rbdr(struct nicvf *nic) +{ + struct queue_set *qs = nic->qs; + int rbdr_idx = qs->rbdr_cnt; + unsigned long qcount, head, tail, rb_cnt; + struct rbdr *rbdr; + + if (!rbdr_idx) + return; + rbdr_idx--; + rbdr = &qs->rbdr[rbdr_idx]; + /* Check if it's enabled */ + if (!rbdr->enable) { + printf("Receive queue %d is disabled\n", rbdr_idx); + return; + } + + /* check if valid descs reached or crossed threshold level */ + qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); + head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, rbdr_idx); + tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx); + + qcount &= 0x7FFFF; + + rb_cnt = qs->rbdr_len - qcount - 1; + + debug("%s: %d: qcount: %lu, head: %lx, tail: %lx, rb_cnt: %lu\n", + __func__, __LINE__, qcount, head, tail, rb_cnt); + + /* Notify HW */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, rbdr_idx, rb_cnt); + + asm volatile ("dsb sy"); +} + +/* TBD: how to handle full packets received in CQ + * i.e conversion of buffers into SKBs + */ +static int nicvf_init_cmp_queue(struct nicvf *nic, + struct cmp_queue *cq, int q_len) +{ + if (nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, + CMP_QUEUE_DESC_SIZE, + NICVF_CQ_BASE_ALIGN_BYTES)) { + printf("Unable to allocate memory for completion queue\n"); + return -1; + } + cq->desc = cq->dmem.base; + if (!pass1_silicon(nic->rev_id, nic->nicpf->hw->model_id)) + cq->thresh = CMP_QUEUE_CQE_THRESH; + else + cq->thresh = 0; + cq->intr_timer_thresh = CMP_QUEUE_TIMER_THRESH; + + return 0; +} + +static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) +{ + if (!cq) + return; + if (!cq->dmem.base) + return; + + nicvf_free_q_desc_mem(nic, &cq->dmem); +} + +static int nicvf_init_snd_queue(struct nicvf *nic, + struct snd_queue *sq, int q_len) +{ + if (nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, + SND_QUEUE_DESC_SIZE, + NICVF_SQ_BASE_ALIGN_BYTES)) { + printf("Unable to allocate memory for send queue\n"); + return -1; + } + + sq->desc = sq->dmem.base; + sq->skbuff = calloc(q_len, sizeof(u64)); + sq->head = 0; + sq->tail = 0; + sq->free_cnt = q_len - 1; + sq->thresh = SND_QUEUE_THRESH; + + return 0; +} + +static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) +{ + if (!sq) + return; + if (!sq->dmem.base) + return; + + debug("%s: %d\n", __func__, __LINE__); + free(sq->skbuff); + + nicvf_free_q_desc_mem(nic, &sq->dmem); +} + +static void nicvf_reclaim_snd_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + /* Disable send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); + /* Check if SQ is stopped */ + if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) + return; + /* Reset send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); +} + +static void nicvf_reclaim_rcv_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + union nic_mbx mbx = {}; + + /* Make sure all packets in the pipeline are written back into mem */ + mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_reclaim_cmp_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + /* Disable timer threshold (doesn't get reset upon CQ reset */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); + /* Disable completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); + /* Reset completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); +} + +static void nicvf_reclaim_rbdr(struct nicvf *nic, + struct rbdr *rbdr, int qidx) +{ + u64 tmp, fifo_state; + int timeout = 10; + + /* Save head and tail pointers for feeing up buffers */ + rbdr->head = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_HEAD, + qidx) >> 3; + rbdr->tail = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_TAIL, + qidx) >> 3; + + /* If RBDR FIFO is in 'FAIL' state then do a reset first + * before relaiming. + */ + fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); + if (((fifo_state >> 62) & 0x03) == 0x3) + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, NICVF_RBDR_RESET); + + /* Disable RBDR */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) + return; + while (1) { + tmp = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_PREFETCH_STATUS, + qidx); + if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) + break; + mdelay(2000); + timeout--; + if (!timeout) { + printf("Failed polling on prefetch status\n"); + return; + } + } + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, NICVF_RBDR_RESET); + + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) + return; + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) + return; +} + +/* Configures receive queue */ +static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + union nic_mbx mbx = {}; + struct rcv_queue *rq; + union { + struct rq_cfg s; + u64 u; + } rq_cfg; + + rq = &qs->rq[qidx]; + rq->enable = enable; + + /* Disable receive queue */ + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); + + if (!rq->enable) { + nicvf_reclaim_rcv_queue(nic, qs, qidx); + return; + } + + rq->cq_qs = qs->vnic_id; + rq->cq_idx = qidx; + rq->start_rbdr_qs = qs->vnic_id; + rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; + rq->cont_rbdr_qs = qs->vnic_id; + rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; + /* all writes of RBDR data to be loaded into L2 Cache as well*/ + rq->caching = 1; + + /* Send a mailbox msg to PF to config RQ */ + mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; + mbx.rq.qs_num = qs->vnic_id; + mbx.rq.rq_num = qidx; + mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | + (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | + (rq->cont_qs_rbdr_idx << 8) | + (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); + nicvf_send_msg_to_pf(nic, &mbx); + + mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; + mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); + nicvf_send_msg_to_pf(nic, &mbx); + + /* RQ drop config + * Enable CQ drop to reserve sufficient CQEs for all tx packets + */ + mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; + mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); + nicvf_send_msg_to_pf(nic, &mbx); + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); + + /* Enable Receive queue */ + rq_cfg.s.ena = 1; + rq_cfg.s.tcp_ena = 0; + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.u); +} + +void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + struct cmp_queue *cq; + union { + u64 u; + struct cq_cfg s; + } cq_cfg; + + cq = &qs->cq[qidx]; + cq->enable = enable; + + if (!cq->enable) { + nicvf_reclaim_cmp_queue(nic, qs, qidx); + return; + } + + /* Reset completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); + + if (!cq->enable) + return; + + /* Set completion queue base address */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, + qidx, (u64)(cq->dmem.phys_base)); + + /* Enable Completion queue */ + cq_cfg.s.ena = 1; + cq_cfg.s.reset = 0; + cq_cfg.s.caching = 0; + cq_cfg.s.qsize = CMP_QSIZE; + cq_cfg.s.avg_con = 0; + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.u); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, + cq->intr_timer_thresh); +} + +/* Configures transmit queue */ +static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + union nic_mbx mbx = {}; + struct snd_queue *sq; + + union { + struct sq_cfg s; + u64 u; + } sq_cfg; + + sq = &qs->sq[qidx]; + sq->enable = enable; + + if (!sq->enable) { + nicvf_reclaim_snd_queue(nic, qs, qidx); + return; + } + + /* Reset send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); + + sq->cq_qs = qs->vnic_id; + sq->cq_idx = qidx; + + /* Send a mailbox msg to PF to config SQ */ + mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; + mbx.sq.qs_num = qs->vnic_id; + mbx.sq.sq_num = qidx; + mbx.sq.sqs_mode = nic->sqs_mode; + mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; + nicvf_send_msg_to_pf(nic, &mbx); + + /* Set queue base address */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, + qidx, (u64)(sq->dmem.phys_base)); + + /* Enable send queue & set queue size */ + sq_cfg.s.ena = 1; + sq_cfg.s.reset = 0; + sq_cfg.s.ldwb = 0; + sq_cfg.s.qsize = SND_QSIZE; + sq_cfg.s.tstmp_bgx_intf = 0; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.u); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); +} + +/* Configures receive buffer descriptor ring */ +static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + struct rbdr *rbdr; + union { + struct rbdr_cfg s; + u64 u; + } rbdr_cfg; + + rbdr = &qs->rbdr[qidx]; + nicvf_reclaim_rbdr(nic, rbdr, qidx); + if (!enable) + return; + + /* Set descriptor base address */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, + qidx, (u64)(rbdr->dmem.phys_base)); + + /* Enable RBDR & set queue size */ + /* Buffer size should be in multiples of 128 bytes */ + rbdr_cfg.s.ena = 1; + rbdr_cfg.s.reset = 0; + rbdr_cfg.s.ldwb = 0; + rbdr_cfg.s.qsize = RBDR_SIZE; + rbdr_cfg.s.avg_con = 0; + rbdr_cfg.s.lines = rbdr->dma_size / 128; + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, rbdr_cfg.u); + + /* Notify HW */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, + qidx, qs->rbdr_len - 1); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, + qidx, rbdr->thresh - 1); +} + +/* Requests PF to assign and enable Qset */ +void nicvf_qset_config(struct nicvf *nic, bool enable) +{ + union nic_mbx mbx = {}; + struct queue_set *qs = nic->qs; + struct qs_cfg *qs_cfg; + + if (!qs) { + printf("Qset is still not allocated, don't init queues\n"); + return; + } + + qs->enable = enable; + qs->vnic_id = nic->vf_id; + + /* Send a mailbox msg to PF to config Qset */ + mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; + mbx.qs.num = qs->vnic_id; +#ifdef VNIC_MULTI_QSET_SUPPORT + mbx.qs.sqs_count = nic->sqs_count; +#endif + + mbx.qs.cfg = 0; + qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; + if (qs->enable) { + qs_cfg->ena = 1; +#ifdef __BIG_ENDIAN + qs_cfg->be = 1; +#endif + qs_cfg->vnic = qs->vnic_id; + } + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_free_resources(struct nicvf *nic) +{ + int qidx; + struct queue_set *qs = nic->qs; + + /* Free receive buffer descriptor ring */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_free_rbdr(nic, &qs->rbdr[qidx]); + + /* Free completion queue */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_free_cmp_queue(nic, &qs->cq[qidx]); + + /* Free send queue */ + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_free_snd_queue(nic, &qs->sq[qidx]); +} + +static int nicvf_alloc_resources(struct nicvf *nic) +{ + int qidx; + struct queue_set *qs = nic->qs; + + /* Alloc receive buffer descriptor ring */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { + if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, + DMA_BUFFER_LEN)) + goto alloc_fail; + } + + /* Alloc send queue */ + for (qidx = 0; qidx < qs->sq_cnt; qidx++) { + if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) + goto alloc_fail; + } + + /* Alloc completion queue */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) + goto alloc_fail; + } + + return 0; +alloc_fail: + nicvf_free_resources(nic); + return -1; +} + +int nicvf_set_qset_resources(struct nicvf *nic) +{ + struct queue_set *qs; + + qs = calloc(1, sizeof(struct queue_set)); + if (!qs) + return -1; + nic->qs = qs; + + /* Set count of each queue */ + qs->rbdr_cnt = RBDR_CNT; + qs->rq_cnt = 1; + qs->sq_cnt = SND_QUEUE_CNT; + qs->cq_cnt = CMP_QUEUE_CNT; + + /* Set queue lengths */ + qs->rbdr_len = RCV_BUF_COUNT; + qs->sq_len = SND_QUEUE_LEN; + qs->cq_len = CMP_QUEUE_LEN; + + nic->rx_queues = qs->rq_cnt; + nic->tx_queues = qs->sq_cnt; + + return 0; +} + +int nicvf_config_data_transfer(struct nicvf *nic, bool enable) +{ + bool disable = false; + struct queue_set *qs = nic->qs; + int qidx; + + if (!qs) + return 0; + + if (enable) { + if (nicvf_alloc_resources(nic)) + return -1; + + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_snd_queue_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_cmp_queue_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_rbdr_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->rq_cnt; qidx++) + nicvf_rcv_queue_config(nic, qs, qidx, enable); + } else { + for (qidx = 0; qidx < qs->rq_cnt; qidx++) + nicvf_rcv_queue_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_rbdr_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_snd_queue_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_cmp_queue_config(nic, qs, qidx, disable); + + nicvf_free_resources(nic); + } + + return 0; +} + +/* Get a free desc from SQ + * returns descriptor ponter & descriptor number + */ +static int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) +{ + int qentry; + + qentry = sq->tail; + sq->free_cnt -= desc_cnt; + sq->tail += desc_cnt; + sq->tail &= (sq->dmem.q_len - 1); + + return qentry; +} + +/* Free descriptor back to SQ for future use */ +void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) +{ + sq->free_cnt += desc_cnt; + sq->head += desc_cnt; + sq->head &= (sq->dmem.q_len - 1); +} + +static int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) +{ + qentry++; + qentry &= (sq->dmem.q_len - 1); + return qentry; +} + +void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) +{ + u64 sq_cfg; + + sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); + sq_cfg |= NICVF_SQ_EN; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); + /* Ring doorbell so that H/W restarts processing SQEs */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); +} + +void nicvf_sq_disable(struct nicvf *nic, int qidx) +{ + u64 sq_cfg; + + sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); + sq_cfg &= ~NICVF_SQ_EN; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); +} + +void nicvf_sq_free_used_descs(struct udevice *dev, struct snd_queue *sq, + int qidx) +{ + u64 head; + struct nicvf *nic = dev_get_priv(dev); + struct sq_hdr_subdesc *hdr; + + head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; + + while (sq->head != head) { + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); + if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { + nicvf_put_sq_desc(sq, 1); + continue; + } + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); + } +} + +/* Get the number of SQ descriptors needed to xmit this skb */ +static int nicvf_sq_subdesc_required(struct nicvf *nic) +{ + int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; + + return subdesc_cnt; +} + +/* Add SQ HEADER subdescriptor. + * First subdescriptor for every send descriptor. + */ +static inline void +nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, + int subdesc_cnt, void *pkt, size_t pkt_len) +{ + struct sq_hdr_subdesc *hdr; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); + sq->skbuff[qentry] = (uintptr_t)pkt; + + memset(hdr, 0, SND_QUEUE_DESC_SIZE); + hdr->subdesc_type = SQ_DESC_TYPE_HEADER; + /* Enable notification via CQE after processing SQE */ + hdr->post_cqe = 1; + /* No of subdescriptors following this */ + hdr->subdesc_cnt = subdesc_cnt; + hdr->tot_len = pkt_len; + + flush_dcache_range((uintptr_t)hdr, + (uintptr_t)hdr + sizeof(struct sq_hdr_subdesc)); +} + +/* SQ GATHER subdescriptor + * Must follow HDR descriptor + */ +static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, + size_t size, uintptr_t data) +{ + struct sq_gather_subdesc *gather; + + qentry &= (sq->dmem.q_len - 1); + gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); + + memset(gather, 0, SND_QUEUE_DESC_SIZE); + gather->subdesc_type = SQ_DESC_TYPE_GATHER; + gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; + gather->size = size; + gather->addr = data; + + flush_dcache_range((uintptr_t)gather, + (uintptr_t)gather + sizeof(struct sq_hdr_subdesc)); +} + +/* Append an skb to a SQ for packet transfer. */ +int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_size) +{ + int subdesc_cnt; + int sq_num = 0, qentry; + struct queue_set *qs; + struct snd_queue *sq; + + qs = nic->qs; + sq = &qs->sq[sq_num]; + + subdesc_cnt = nicvf_sq_subdesc_required(nic); + if (subdesc_cnt > sq->free_cnt) + goto append_fail; + + qentry = nicvf_get_sq_desc(sq, subdesc_cnt); + + /* Add SQ header subdesc */ + nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, + pkt, pkt_size); + + /* Add SQ gather subdescs */ + qentry = nicvf_get_nxt_sqentry(sq, qentry); + nicvf_sq_add_gather_subdesc(sq, qentry, pkt_size, (uintptr_t)(pkt)); + + flush_dcache_range((uintptr_t)pkt, + (uintptr_t)pkt + pkt_size); + + /* make sure all memory stores are done before ringing doorbell */ + asm volatile ("dsb sy"); + + /* Inform HW to xmit new packet */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, + sq_num, subdesc_cnt); + return 1; + +append_fail: + printf("Not enough SQ descriptors to xmit pkt\n"); + return 0; +} + +static unsigned int frag_num(unsigned int i) +{ +#ifdef __BIG_ENDIAN + return (i & ~3) + 3 - (i & 3); +#else + return i; +#endif +} + +void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len) +{ + int frag; + int payload_len = 0, tot_len; + void *pkt = NULL, *pkt_buf = NULL, *buffer; + struct cqe_rx_t *cqe_rx; + struct rbdr *rbdr; + struct rcv_queue *rq; + struct queue_set *qs = nic->qs; + u16 *rb_lens = NULL; + u64 *rb_ptrs = NULL; + + cqe_rx = (struct cqe_rx_t *)cq_desc; + + rq = &qs->rq[cqe_rx->rq_idx]; + rbdr = &qs->rbdr[rq->start_qs_rbdr_idx]; + rb_lens = cq_desc + (3 * sizeof(u64)); /* Use offsetof */ + /* Except 88xx pass1 on all other chips CQE_RX2_S is added to + * CQE_RX at word6, hence buffer pointers move by word + * + * Use existing 'hw_tso' flag which will be set for all chips + * except 88xx pass1 instead of a additional cache line + * access (or miss) by using pci dev's revision. + */ + if (!nic->hw_tso) + rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); + else + rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); + + /* + * Figure out packet length to create packet buffer + */ + for (frag = 0; frag < cqe_rx->rb_cnt; frag++) + payload_len += rb_lens[frag_num(frag)]; + *pkt_len = payload_len; + /* round up size to 8 byte multiple */ + tot_len = (payload_len & (~0x7)) + 8; + buffer = calloc(1, tot_len); + if (!buffer) { + printf("%s - Failed to allocate packet buffer\n", __func__); + return NULL; + } + pkt_buf = buffer; + debug("total pkt buf %p len %ld tot_len %d\n", pkt_buf, *pkt_len, + tot_len); + for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { + payload_len = rb_lens[frag_num(frag)]; + + invalidate_dcache_range((uintptr_t)(*rb_ptrs), + (uintptr_t)(*rb_ptrs) + rbdr->dma_size); + + /* First fragment */ + *rb_ptrs = *rb_ptrs - cqe_rx->align_pad; + + pkt = nicvf_rb_ptr_to_pkt(nic, *rb_ptrs); + + invalidate_dcache_range((uintptr_t)pkt, + (uintptr_t)pkt + payload_len); + + if (cqe_rx->align_pad) + pkt += cqe_rx->align_pad; + debug("pkt_buf %p, pkt %p payload_len %d\n", pkt_buf, pkt, + payload_len); + memcpy(buffer, pkt, payload_len); + buffer += payload_len; + /* Next buffer pointer */ + rb_ptrs++; + } + return pkt_buf; +} + +/* Clear interrupt */ +void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val = 0; + + switch (int_type) { + case NICVF_INTR_CQ: + reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + break; + case NICVF_INTR_TCP_TIMER: + reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + break; + case NICVF_INTR_MBOX: + reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); + break; + case NICVF_INTR_QS_ERR: + reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + break; + default: + printf("Failed to clear interrupt: unknown type\n"); + break; + } + + nicvf_reg_write(nic, NIC_VF_INT, reg_val); +} + +void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) +{ + struct rcv_queue *rq; + +#define GET_RQ_STATS(reg) \ + nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ + (rq_idx << NIC_Q_NUM_SHIFT) | ((reg) << 3)) + + rq = &nic->qs->rq[rq_idx]; + rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); + rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); +} + +void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) +{ + struct snd_queue *sq; + +#define GET_SQ_STATS(reg) \ + nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ + (sq_idx << NIC_Q_NUM_SHIFT) | ((reg) << 3)) + + sq = &nic->qs->sq[sq_idx]; + sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); + sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); +} + +/* Check for errors in the receive cmp.queue entry */ +int nicvf_check_cqe_rx_errs(struct nicvf *nic, + struct cmp_queue *cq, void *cq_desc) +{ + struct cqe_rx_t *cqe_rx; + struct cmp_queue_stats *stats = &cq->stats; + + cqe_rx = (struct cqe_rx_t *)cq_desc; + if (!cqe_rx->err_level && !cqe_rx->err_opcode) { + stats->rx.errop.good++; + return 0; + } + + switch (cqe_rx->err_level) { + case CQ_ERRLVL_MAC: + stats->rx.errlvl.mac_errs++; + break; + case CQ_ERRLVL_L2: + stats->rx.errlvl.l2_errs++; + break; + case CQ_ERRLVL_L3: + stats->rx.errlvl.l3_errs++; + break; + case CQ_ERRLVL_L4: + stats->rx.errlvl.l4_errs++; + break; + } + + switch (cqe_rx->err_opcode) { + case CQ_RX_ERROP_RE_PARTIAL: + stats->rx.errop.partial_pkts++; + break; + case CQ_RX_ERROP_RE_JABBER: + stats->rx.errop.jabber_errs++; + break; + case CQ_RX_ERROP_RE_FCS: + stats->rx.errop.fcs_errs++; + break; + case CQ_RX_ERROP_RE_TERMINATE: + stats->rx.errop.terminate_errs++; + break; + case CQ_RX_ERROP_RE_RX_CTL: + stats->rx.errop.bgx_rx_errs++; + break; + case CQ_RX_ERROP_PREL2_ERR: + stats->rx.errop.prel2_errs++; + break; + case CQ_RX_ERROP_L2_FRAGMENT: + stats->rx.errop.l2_frags++; + break; + case CQ_RX_ERROP_L2_OVERRUN: + stats->rx.errop.l2_overruns++; + break; + case CQ_RX_ERROP_L2_PFCS: + stats->rx.errop.l2_pfcs++; + break; + case CQ_RX_ERROP_L2_PUNY: + stats->rx.errop.l2_puny++; + break; + case CQ_RX_ERROP_L2_MAL: + stats->rx.errop.l2_hdr_malformed++; + break; + case CQ_RX_ERROP_L2_OVERSIZE: + stats->rx.errop.l2_oversize++; + break; + case CQ_RX_ERROP_L2_UNDERSIZE: + stats->rx.errop.l2_undersize++; + break; + case CQ_RX_ERROP_L2_LENMISM: + stats->rx.errop.l2_len_mismatch++; + break; + case CQ_RX_ERROP_L2_PCLP: + stats->rx.errop.l2_pclp++; + break; + case CQ_RX_ERROP_IP_NOT: + stats->rx.errop.non_ip++; + break; + case CQ_RX_ERROP_IP_CSUM_ERR: + stats->rx.errop.ip_csum_err++; + break; + case CQ_RX_ERROP_IP_MAL: + stats->rx.errop.ip_hdr_malformed++; + break; + case CQ_RX_ERROP_IP_MALD: + stats->rx.errop.ip_payload_malformed++; + break; + case CQ_RX_ERROP_IP_HOP: + stats->rx.errop.ip_hop_errs++; + break; + case CQ_RX_ERROP_L3_ICRC: + stats->rx.errop.l3_icrc_errs++; + break; + case CQ_RX_ERROP_L3_PCLP: + stats->rx.errop.l3_pclp++; + break; + case CQ_RX_ERROP_L4_MAL: + stats->rx.errop.l4_malformed++; + break; + case CQ_RX_ERROP_L4_CHK: + stats->rx.errop.l4_csum_errs++; + break; + case CQ_RX_ERROP_UDP_LEN: + stats->rx.errop.udp_len_err++; + break; + case CQ_RX_ERROP_L4_PORT: + stats->rx.errop.bad_l4_port++; + break; + case CQ_RX_ERROP_TCP_FLAG: + stats->rx.errop.bad_tcp_flag++; + break; + case CQ_RX_ERROP_TCP_OFFSET: + stats->rx.errop.tcp_offset_errs++; + break; + case CQ_RX_ERROP_L4_PCLP: + stats->rx.errop.l4_pclp++; + break; + case CQ_RX_ERROP_RBDR_TRUNC: + stats->rx.errop.pkt_truncated++; + break; + } + + return 1; +} + +/* Check for errors in the send cmp.queue entry */ +int nicvf_check_cqe_tx_errs(struct nicvf *nic, + struct cmp_queue *cq, void *cq_desc) +{ + struct cqe_send_t *cqe_tx; + struct cmp_queue_stats *stats = &cq->stats; + + cqe_tx = (struct cqe_send_t *)cq_desc; + switch (cqe_tx->send_status) { + case CQ_TX_ERROP_GOOD: + stats->tx.good++; + return 0; + break; + case CQ_TX_ERROP_DESC_FAULT: + stats->tx.desc_fault++; + break; + case CQ_TX_ERROP_HDR_CONS_ERR: + stats->tx.hdr_cons_err++; + break; + case CQ_TX_ERROP_SUBDC_ERR: + stats->tx.subdesc_err++; + break; + case CQ_TX_ERROP_IMM_SIZE_OFLOW: + stats->tx.imm_size_oflow++; + break; + case CQ_TX_ERROP_DATA_SEQUENCE_ERR: + stats->tx.data_seq_err++; + break; + case CQ_TX_ERROP_MEM_SEQUENCE_ERR: + stats->tx.mem_seq_err++; + break; + case CQ_TX_ERROP_LOCK_VIOL: + stats->tx.lock_viol++; + break; + case CQ_TX_ERROP_DATA_FAULT: + stats->tx.data_fault++; + break; + case CQ_TX_ERROP_TSTMP_CONFLICT: + stats->tx.tstmp_conflict++; + break; + case CQ_TX_ERROP_TSTMP_TIMEOUT: + stats->tx.tstmp_timeout++; + break; + case CQ_TX_ERROP_MEM_FAULT: + stats->tx.mem_fault++; + break; + case CQ_TX_ERROP_CK_OVERLAP: + stats->tx.csum_overlap++; + break; + case CQ_TX_ERROP_CK_OFLOW: + stats->tx.csum_overflow++; + break; + } + + return 1; +} diff --git a/drivers/net/octeontx/nicvf_queues.h b/drivers/net/octeontx/nicvf_queues.h new file mode 100644 index 00000000000..833b2a1dd47 --- /dev/null +++ b/drivers/net/octeontx/nicvf_queues.h @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef NICVF_QUEUES_H +#define NICVF_QUEUES_H + +#include "q_struct.h" + +#define MAX_QUEUE_SET 128 +#define MAX_RCV_QUEUES_PER_QS 8 +#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 +#define MAX_SND_QUEUES_PER_QS 8 +#define MAX_CMP_QUEUES_PER_QS 8 + +/* VF's queue interrupt ranges */ +#define NICVF_INTR_ID_CQ 0 +#define NICVF_INTR_ID_SQ 8 +#define NICVF_INTR_ID_RBDR 16 +#define NICVF_INTR_ID_MISC 18 +#define NICVF_INTR_ID_QS_ERR 19 + +#define RBDR_SIZE0 0ULL /* 8K entries */ +#define RBDR_SIZE1 1ULL /* 16K entries */ +#define RBDR_SIZE2 2ULL /* 32K entries */ +#define RBDR_SIZE3 3ULL /* 64K entries */ +#define RBDR_SIZE4 4ULL /* 126K entries */ +#define RBDR_SIZE5 5ULL /* 256K entries */ +#define RBDR_SIZE6 6ULL /* 512K entries */ + +#define SND_QUEUE_SIZE0 0ULL /* 1K entries */ +#define SND_QUEUE_SIZE1 1ULL /* 2K entries */ +#define SND_QUEUE_SIZE2 2ULL /* 4K entries */ +#define SND_QUEUE_SIZE3 3ULL /* 8K entries */ +#define SND_QUEUE_SIZE4 4ULL /* 16K entries */ +#define SND_QUEUE_SIZE5 5ULL /* 32K entries */ +#define SND_QUEUE_SIZE6 6ULL /* 64K entries */ + +#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */ +#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */ +#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */ +#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */ +#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */ +#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */ +#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ + +/* Default queue count per QS, its lengths and threshold values */ +#define RBDR_CNT 1 +#define RCV_QUEUE_CNT 1 +#define SND_QUEUE_CNT 1 +#define CMP_QUEUE_CNT 1 /* Max of RCV and SND qcount */ + +#define SND_QSIZE SND_QUEUE_SIZE0 +#define SND_QUEUE_LEN BIT_ULL((SND_QSIZE + 10)) +#define SND_QUEUE_THRESH 2ULL +#define MIN_SQ_DESC_PER_PKT_XMIT 2 +#define MAX_CQE_PER_PKT_XMIT 2 + +#define CMP_QSIZE CMP_QUEUE_SIZE0 +#define CMP_QUEUE_LEN BIT_ULL((CMP_QSIZE + 10)) +#define CMP_QUEUE_CQE_THRESH 0 +#define CMP_QUEUE_TIMER_THRESH 1 /* 1 ms */ + +#define RBDR_SIZE RBDR_SIZE0 +#define RCV_BUF_COUNT BIT_ULL((RBDR_SIZE + 13)) +#define RBDR_THRESH (RCV_BUF_COUNT / 2) +#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ +#define RCV_FRAG_LEN DMA_BUFFER_LEN + +#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) *\ + MAX_CQE_PER_PKT_XMIT) +#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) + +/* Descriptor size */ +#define SND_QUEUE_DESC_SIZE 16 /* 128 bits */ +#define CMP_QUEUE_DESC_SIZE 512 + +/* Buffer / descriptor alignments */ +#define NICVF_RCV_BUF_ALIGN 7 +#define NICVF_RCV_BUF_ALIGN_BYTES BIT_ULL(NICVF_RCV_BUF_ALIGN) +#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ +#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ + +#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) + +/* Queue enable/disable */ +#define NICVF_SQ_EN BIT_ULL(19) + +/* Queue reset */ +#define NICVF_CQ_RESET BIT_ULL(41) +#define NICVF_SQ_RESET BIT_ULL(17) +#define NICVF_RBDR_RESET BIT_ULL(43) + +enum CQ_RX_ERRLVL_E { + CQ_ERRLVL_MAC, + CQ_ERRLVL_L2, + CQ_ERRLVL_L3, + CQ_ERRLVL_L4, +}; + +enum CQ_RX_ERROP_E { + CQ_RX_ERROP_RE_NONE = 0x0, + CQ_RX_ERROP_RE_PARTIAL = 0x1, + CQ_RX_ERROP_RE_JABBER = 0x2, + CQ_RX_ERROP_RE_FCS = 0x7, + CQ_RX_ERROP_RE_TERMINATE = 0x9, + CQ_RX_ERROP_RE_RX_CTL = 0xb, + CQ_RX_ERROP_PREL2_ERR = 0x1f, + CQ_RX_ERROP_L2_FRAGMENT = 0x20, + CQ_RX_ERROP_L2_OVERRUN = 0x21, + CQ_RX_ERROP_L2_PFCS = 0x22, + CQ_RX_ERROP_L2_PUNY = 0x23, + CQ_RX_ERROP_L2_MAL = 0x24, + CQ_RX_ERROP_L2_OVERSIZE = 0x25, + CQ_RX_ERROP_L2_UNDERSIZE = 0x26, + CQ_RX_ERROP_L2_LENMISM = 0x27, + CQ_RX_ERROP_L2_PCLP = 0x28, + CQ_RX_ERROP_IP_NOT = 0x41, + CQ_RX_ERROP_IP_CSUM_ERR = 0x42, + CQ_RX_ERROP_IP_MAL = 0x43, + CQ_RX_ERROP_IP_MALD = 0x44, + CQ_RX_ERROP_IP_HOP = 0x45, + CQ_RX_ERROP_L3_ICRC = 0x46, + CQ_RX_ERROP_L3_PCLP = 0x47, + CQ_RX_ERROP_L4_MAL = 0x61, + CQ_RX_ERROP_L4_CHK = 0x62, + CQ_RX_ERROP_UDP_LEN = 0x63, + CQ_RX_ERROP_L4_PORT = 0x64, + CQ_RX_ERROP_TCP_FLAG = 0x65, + CQ_RX_ERROP_TCP_OFFSET = 0x66, + CQ_RX_ERROP_L4_PCLP = 0x67, + CQ_RX_ERROP_RBDR_TRUNC = 0x70, +}; + +enum CQ_TX_ERROP_E { + CQ_TX_ERROP_GOOD = 0x0, + CQ_TX_ERROP_DESC_FAULT = 0x10, + CQ_TX_ERROP_HDR_CONS_ERR = 0x11, + CQ_TX_ERROP_SUBDC_ERR = 0x12, + CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, + CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, + CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, + CQ_TX_ERROP_LOCK_VIOL = 0x83, + CQ_TX_ERROP_DATA_FAULT = 0x84, + CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, + CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, + CQ_TX_ERROP_MEM_FAULT = 0x87, + CQ_TX_ERROP_CK_OVERLAP = 0x88, + CQ_TX_ERROP_CK_OFLOW = 0x89, + CQ_TX_ERROP_ENUM_LAST = 0x8a, +}; + +struct cmp_queue_stats { + struct rx_stats { + struct { + u64 mac_errs; + u64 l2_errs; + u64 l3_errs; + u64 l4_errs; + } errlvl; + struct { + u64 good; + u64 partial_pkts; + u64 jabber_errs; + u64 fcs_errs; + u64 terminate_errs; + u64 bgx_rx_errs; + u64 prel2_errs; + u64 l2_frags; + u64 l2_overruns; + u64 l2_pfcs; + u64 l2_puny; + u64 l2_hdr_malformed; + u64 l2_oversize; + u64 l2_undersize; + u64 l2_len_mismatch; + u64 l2_pclp; + u64 non_ip; + u64 ip_csum_err; + u64 ip_hdr_malformed; + u64 ip_payload_malformed; + u64 ip_hop_errs; + u64 l3_icrc_errs; + u64 l3_pclp; + u64 l4_malformed; + u64 l4_csum_errs; + u64 udp_len_err; + u64 bad_l4_port; + u64 bad_tcp_flag; + u64 tcp_offset_errs; + u64 l4_pclp; + u64 pkt_truncated; + } errop; + } rx; + struct tx_stats { + u64 good; + u64 desc_fault; + u64 hdr_cons_err; + u64 subdesc_err; + u64 imm_size_oflow; + u64 data_seq_err; + u64 mem_seq_err; + u64 lock_viol; + u64 data_fault; + u64 tstmp_conflict; + u64 tstmp_timeout; + u64 mem_fault; + u64 csum_overlap; + u64 csum_overflow; + } tx; +}; + +enum RQ_SQ_STATS { + RQ_SQ_STATS_OCTS, + RQ_SQ_STATS_PKTS, +}; + +struct rx_tx_queue_stats { + u64 bytes; + u64 pkts; +}; + +struct q_desc_mem { + uintptr_t dma; + u64 size; + u16 q_len; + uintptr_t phys_base; + void *base; + void *unalign_base; + bool allocated; +}; + +struct rbdr { + bool enable; + u32 dma_size; + u32 thresh; /* Threshold level for interrupt */ + void *desc; + u32 head; + u32 tail; + struct q_desc_mem dmem; + uintptr_t buf_mem; + uintptr_t buffers; +}; + +struct rcv_queue { + bool enable; + struct rbdr *rbdr_start; + struct rbdr *rbdr_cont; + bool en_tcp_reassembly; + u8 cq_qs; /* CQ's QS to which this RQ is assigned */ + u8 cq_idx; /* CQ index (0 to 7) in the QS */ + u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ + u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ + u8 start_rbdr_qs; /* First buffer ptrs - QS num */ + u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ + u8 caching; + struct rx_tx_queue_stats stats; +}; + +struct cmp_queue { + bool enable; + u16 intr_timer_thresh; + u16 thresh; + void *desc; + struct q_desc_mem dmem; + struct cmp_queue_stats stats; +}; + +struct snd_queue { + bool enable; + u8 cq_qs; /* CQ's QS to which this SQ is pointing */ + u8 cq_idx; /* CQ index (0 to 7) in the above QS */ + u16 thresh; + u32 free_cnt; + u32 head; + u32 tail; + u64 *skbuff; + void *desc; + struct q_desc_mem dmem; + struct rx_tx_queue_stats stats; +}; + +struct queue_set { + bool enable; + bool be_en; + u8 vnic_id; + u8 rq_cnt; + u8 cq_cnt; + u64 cq_len; + u8 sq_cnt; + u64 sq_len; + u8 rbdr_cnt; + u64 rbdr_len; + struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; + struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; + struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; + struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; +}; + +#define GET_RBDR_DESC(RING, idx)\ + (&(((struct rbdr_entry_t *)((RING)->desc))[idx])) +#define GET_SQ_DESC(RING, idx)\ + (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx])) +#define GET_CQ_DESC(RING, idx)\ + (&(((union cq_desc_t *)((RING)->desc))[idx])) + +/* CQ status bits */ +#define CQ_WR_FULL BIT(26) +#define CQ_WR_DISABLE BIT(25) +#define CQ_WR_FAULT BIT(24) +#define CQ_CQE_COUNT (0xFFFF << 0) + +#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) + +int nicvf_set_qset_resources(struct nicvf *nic); +int nicvf_config_data_transfer(struct nicvf *nic, bool enable); +void nicvf_qset_config(struct nicvf *nic, bool enable); +void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable); + +void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); +void nicvf_sq_disable(struct nicvf *nic, int qidx); +void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); +void nicvf_sq_free_used_descs(struct udevice *dev, + struct snd_queue *sq, int qidx); +int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_len); + +void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len); +void nicvf_refill_rbdr(struct nicvf *nic); + +void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); +void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); +void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); +int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); + +/* Register access APIs */ +void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); +u64 nicvf_reg_read(struct nicvf *nic, u64 offset); +void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); +u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); +void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, + u64 qidx, u64 val); +u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx); + +/* Stats */ +void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); +void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); +int nicvf_check_cqe_rx_errs(struct nicvf *nic, + struct cmp_queue *cq, void *cq_desc); +int nicvf_check_cqe_tx_errs(struct nicvf *nic, + struct cmp_queue *cq, void *cq_desc); +#endif /* NICVF_QUEUES_H */ diff --git a/drivers/net/octeontx/q_struct.h b/drivers/net/octeontx/q_struct.h new file mode 100644 index 00000000000..87abb132f50 --- /dev/null +++ b/drivers/net/octeontx/q_struct.h @@ -0,0 +1,695 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef Q_STRUCT_H +#define Q_STRUCT_H + +/* Load transaction types for reading segment bytes specified by + * NIC_SEND_GATHER_S[LD_TYPE]. + */ +enum nic_send_ld_type_e { + NIC_SEND_LD_TYPE_E_LDD = 0x0, + NIC_SEND_LD_TYPE_E_LDT = 0x1, + NIC_SEND_LD_TYPE_E_LDWB = 0x2, + NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3, +}; + +enum ether_type_algorithm { + ETYPE_ALG_NONE = 0x0, + ETYPE_ALG_SKIP = 0x1, + ETYPE_ALG_ENDPARSE = 0x2, + ETYPE_ALG_VLAN = 0x3, + ETYPE_ALG_VLAN_STRIP = 0x4, +}; + +enum layer3_type { + L3TYPE_NONE = 0x00, + L3TYPE_GRH = 0x01, + L3TYPE_IPV4 = 0x04, + L3TYPE_IPV4_OPTIONS = 0x05, + L3TYPE_IPV6 = 0x06, + L3TYPE_IPV6_OPTIONS = 0x07, + L3TYPE_ET_STOP = 0x0D, + L3TYPE_OTHER = 0x0E, +}; + +enum layer4_type { + L4TYPE_NONE = 0x00, + L4TYPE_IPSEC_ESP = 0x01, + L4TYPE_IPFRAG = 0x02, + L4TYPE_IPCOMP = 0x03, + L4TYPE_TCP = 0x04, + L4TYPE_UDP = 0x05, + L4TYPE_SCTP = 0x06, + L4TYPE_GRE = 0x07, + L4TYPE_ROCE_BTH = 0x08, + L4TYPE_OTHER = 0x0E, +}; + +/* CPI and RSSI configuration */ +enum cpi_algorithm_type { + CPI_ALG_NONE = 0x0, + CPI_ALG_VLAN = 0x1, + CPI_ALG_VLAN16 = 0x2, + CPI_ALG_DIFF = 0x3, +}; + +enum rss_algorithm_type { + RSS_ALG_NONE = 0x00, + RSS_ALG_PORT = 0x01, + RSS_ALG_IP = 0x02, + RSS_ALG_TCP_IP = 0x03, + RSS_ALG_UDP_IP = 0x04, + RSS_ALG_SCTP_IP = 0x05, + RSS_ALG_GRE_IP = 0x06, + RSS_ALG_ROCE = 0x07, +}; + +enum rss_hash_cfg { + RSS_HASH_L2ETC = 0x00, + RSS_HASH_IP = 0x01, + RSS_HASH_TCP = 0x02, + RSS_TCP_SYN_DIS = 0x03, + RSS_HASH_UDP = 0x04, + RSS_HASH_L4ETC = 0x05, + RSS_HASH_ROCE = 0x06, + RSS_L3_BIDI = 0x07, + RSS_L4_BIDI = 0x08, +}; + +/* Completion queue entry types */ +enum cqe_type { + CQE_TYPE_INVALID = 0x0, + CQE_TYPE_RX = 0x2, + CQE_TYPE_RX_SPLIT = 0x3, + CQE_TYPE_RX_TCP = 0x4, + CQE_TYPE_SEND = 0x8, + CQE_TYPE_SEND_PTP = 0x9, +}; + +enum cqe_rx_tcp_status { + CQE_RX_STATUS_VALID_TCP_CNXT = 0x00, + CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F, +}; + +enum cqe_send_status { + CQE_SEND_STATUS_GOOD = 0x00, + CQE_SEND_STATUS_DESC_FAULT = 0x01, + CQE_SEND_STATUS_HDR_CONS_ERR = 0x11, + CQE_SEND_STATUS_SUBDESC_ERR = 0x12, + CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80, + CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81, + CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82, + CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83, + CQE_SEND_STATUS_LOCK_VIOL = 0x84, + CQE_SEND_STATUS_LOCK_UFLOW = 0x85, + CQE_SEND_STATUS_DATA_FAULT = 0x86, + CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87, + CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88, + CQE_SEND_STATUS_MEM_FAULT = 0x89, + CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A, + CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B, +}; + +enum cqe_rx_tcp_end_reason { + CQE_RX_TCP_END_FIN_FLAG_DET = 0, + CQE_RX_TCP_END_INVALID_FLAG = 1, + CQE_RX_TCP_END_TIMEOUT = 2, + CQE_RX_TCP_END_OUT_OF_SEQ = 3, + CQE_RX_TCP_END_PKT_ERR = 4, + CQE_RX_TCP_END_QS_DISABLED = 0x0F, +}; + +/* Packet protocol level error enumeration */ +enum cqe_rx_err_level { + CQE_RX_ERRLVL_RE = 0x0, + CQE_RX_ERRLVL_L2 = 0x1, + CQE_RX_ERRLVL_L3 = 0x2, + CQE_RX_ERRLVL_L4 = 0x3, +}; + +/* Packet protocol level error type enumeration */ +enum cqe_rx_err_opcode { + CQE_RX_ERR_RE_NONE = 0x0, + CQE_RX_ERR_RE_PARTIAL = 0x1, + CQE_RX_ERR_RE_JABBER = 0x2, + CQE_RX_ERR_RE_FCS = 0x7, + CQE_RX_ERR_RE_TERMINATE = 0x9, + CQE_RX_ERR_RE_RX_CTL = 0xb, + CQE_RX_ERR_PREL2_ERR = 0x1f, + CQE_RX_ERR_L2_FRAGMENT = 0x20, + CQE_RX_ERR_L2_OVERRUN = 0x21, + CQE_RX_ERR_L2_PFCS = 0x22, + CQE_RX_ERR_L2_PUNY = 0x23, + CQE_RX_ERR_L2_MAL = 0x24, + CQE_RX_ERR_L2_OVERSIZE = 0x25, + CQE_RX_ERR_L2_UNDERSIZE = 0x26, + CQE_RX_ERR_L2_LENMISM = 0x27, + CQE_RX_ERR_L2_PCLP = 0x28, + CQE_RX_ERR_IP_NOT = 0x41, + CQE_RX_ERR_IP_CHK = 0x42, + CQE_RX_ERR_IP_MAL = 0x43, + CQE_RX_ERR_IP_MALD = 0x44, + CQE_RX_ERR_IP_HOP = 0x45, + CQE_RX_ERR_L3_ICRC = 0x46, + CQE_RX_ERR_L3_PCLP = 0x47, + CQE_RX_ERR_L4_MAL = 0x61, + CQE_RX_ERR_L4_CHK = 0x62, + CQE_RX_ERR_UDP_LEN = 0x63, + CQE_RX_ERR_L4_PORT = 0x64, + CQE_RX_ERR_TCP_FLAG = 0x65, + CQE_RX_ERR_TCP_OFFSET = 0x66, + CQE_RX_ERR_L4_PCLP = 0x67, + CQE_RX_ERR_RBDR_TRUNC = 0x70, +}; + +struct cqe_rx_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 stdn_fault:1; + u64 rsvd0:1; + u64 rq_qs:7; + u64 rq_idx:3; + u64 rsvd1:12; + u64 rss_alg:4; + u64 rsvd2:4; + u64 rb_cnt:4; + u64 vlan_found:1; + u64 vlan_stripped:1; + u64 vlan2_found:1; + u64 vlan2_stripped:1; + u64 l4_type:4; + u64 l3_type:4; + u64 l2_present:1; + u64 err_level:3; + u64 err_opcode:8; + + u64 pkt_len:16; /* W1 */ + u64 l2_ptr:8; + u64 l3_ptr:8; + u64 l4_ptr:8; + u64 cq_pkt_len:8; + u64 align_pad:3; + u64 rsvd3:1; + u64 chan:12; + + u64 rss_tag:32; /* W2 */ + u64 vlan_tci:16; + u64 vlan_ptr:8; + u64 vlan2_ptr:8; + + u64 rb3_sz:16; /* W3 */ + u64 rb2_sz:16; + u64 rb1_sz:16; + u64 rb0_sz:16; + + u64 rb7_sz:16; /* W4 */ + u64 rb6_sz:16; + u64 rb5_sz:16; + u64 rb4_sz:16; + + u64 rb11_sz:16; /* W5 */ + u64 rb10_sz:16; + u64 rb9_sz:16; + u64 rb8_sz:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 err_opcode:8; + u64 err_level:3; + u64 l2_present:1; + u64 l3_type:4; + u64 l4_type:4; + u64 vlan2_stripped:1; + u64 vlan2_found:1; + u64 vlan_stripped:1; + u64 vlan_found:1; + u64 rb_cnt:4; + u64 rsvd2:4; + u64 rss_alg:4; + u64 rsvd1:12; + u64 rq_idx:3; + u64 rq_qs:7; + u64 rsvd0:1; + u64 stdn_fault:1; + u64 cqe_type:4; /* W0 */ + u64 chan:12; + u64 rsvd3:1; + u64 align_pad:3; + u64 cq_pkt_len:8; + u64 l4_ptr:8; + u64 l3_ptr:8; + u64 l2_ptr:8; + u64 pkt_len:16; /* W1 */ + u64 vlan2_ptr:8; + u64 vlan_ptr:8; + u64 vlan_tci:16; + u64 rss_tag:32; /* W2 */ + u64 rb0_sz:16; + u64 rb1_sz:16; + u64 rb2_sz:16; + u64 rb3_sz:16; /* W3 */ + u64 rb4_sz:16; + u64 rb5_sz:16; + u64 rb6_sz:16; + u64 rb7_sz:16; /* W4 */ + u64 rb8_sz:16; + u64 rb9_sz:16; + u64 rb10_sz:16; + u64 rb11_sz:16; /* W5 */ +#endif + u64 rb0_ptr:64; + u64 rb1_ptr:64; + u64 rb2_ptr:64; + u64 rb3_ptr:64; + u64 rb4_ptr:64; + u64 rb5_ptr:64; + u64 rb6_ptr:64; + u64 rb7_ptr:64; + u64 rb8_ptr:64; + u64 rb9_ptr:64; + u64 rb10_ptr:64; + u64 rb11_ptr:64; +}; + +struct cqe_rx_tcp_err_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:60; + + u64 rsvd1:4; /* W1 */ + u64 partial_first:1; + u64 rsvd2:27; + u64 rbdr_bytes:8; + u64 rsvd3:24; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 rsvd0:60; + u64 cqe_type:4; + + u64 rsvd3:24; + u64 rbdr_bytes:8; + u64 rsvd2:27; + u64 partial_first:1; + u64 rsvd1:4; +#endif +}; + +struct cqe_rx_tcp_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:52; + u64 cq_tcp_status:8; + + u64 rsvd1:32; /* W1 */ + u64 tcp_cntx_bytes:8; + u64 rsvd2:8; + u64 tcp_err_bytes:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 cq_tcp_status:8; + u64 rsvd0:52; + u64 cqe_type:4; /* W0 */ + + u64 tcp_err_bytes:16; + u64 rsvd2:8; + u64 tcp_cntx_bytes:8; + u64 rsvd1:32; /* W1 */ +#endif +}; + +struct cqe_send_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:4; + u64 sqe_ptr:16; + u64 rsvd1:4; + u64 rsvd2:10; + u64 sq_qs:7; + u64 sq_idx:3; + u64 rsvd3:8; + u64 send_status:8; + + u64 ptp_timestamp:64; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 send_status:8; + u64 rsvd3:8; + u64 sq_idx:3; + u64 sq_qs:7; + u64 rsvd2:10; + u64 rsvd1:4; + u64 sqe_ptr:16; + u64 rsvd0:4; + u64 cqe_type:4; /* W0 */ + + u64 ptp_timestamp:64; /* W1 */ +#endif +}; + +union cq_desc_t { + u64 u[64]; + struct cqe_send_t snd_hdr; + struct cqe_rx_t rx_hdr; + struct cqe_rx_tcp_t rx_tcp_hdr; + struct cqe_rx_tcp_err_t rx_tcp_err_hdr; +}; + +struct rbdr_entry_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd0:15; + u64 buf_addr:42; + u64 cache_align:7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 cache_align:7; + u64 buf_addr:42; + u64 rsvd0:15; +#endif +}; + +/* TCP reassembly context */ +struct rbe_tcp_cnxt_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 tcp_pkt_cnt:12; + u64 rsvd1:4; + u64 align_hdr_bytes:4; + u64 align_ptr_bytes:4; + u64 ptr_bytes:16; + u64 rsvd2:24; + u64 cqe_type:4; + u64 rsvd0:54; + u64 tcp_end_reason:2; + u64 tcp_status:4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tcp_status:4; + u64 tcp_end_reason:2; + u64 rsvd0:54; + u64 cqe_type:4; + u64 rsvd2:24; + u64 ptr_bytes:16; + u64 align_ptr_bytes:4; + u64 align_hdr_bytes:4; + u64 rsvd1:4; + u64 tcp_pkt_cnt:12; +#endif +}; + +/* Always Big endian */ +struct rx_hdr_t { + u64 opaque:32; + u64 rss_flow:8; + u64 skip_length:6; + u64 disable_rss:1; + u64 disable_tcp_reassembly:1; + u64 nodrop:1; + u64 dest_alg:2; + u64 rsvd0:2; + u64 dest_rq:11; +}; + +enum send_l4_csum_type { + SEND_L4_CSUM_DISABLE = 0x00, + SEND_L4_CSUM_UDP = 0x01, + SEND_L4_CSUM_TCP = 0x02, + SEND_L4_CSUM_SCTP = 0x03, +}; + +enum send_crc_alg { + SEND_CRCALG_CRC32 = 0x00, + SEND_CRCALG_CRC32C = 0x01, + SEND_CRCALG_ICRC = 0x02, +}; + +enum send_load_type { + SEND_LD_TYPE_LDD = 0x00, + SEND_LD_TYPE_LDT = 0x01, + SEND_LD_TYPE_LDWB = 0x02, +}; + +enum send_mem_alg_type { + SEND_MEMALG_SET = 0x00, + SEND_MEMALG_ADD = 0x08, + SEND_MEMALG_SUB = 0x09, + SEND_MEMALG_ADDLEN = 0x0A, + SEND_MEMALG_SUBLEN = 0x0B, +}; + +enum send_mem_dsz_type { + SEND_MEMDSZ_B64 = 0x00, + SEND_MEMDSZ_B32 = 0x01, + SEND_MEMDSZ_B8 = 0x03, +}; + +enum sq_subdesc_type { + SQ_DESC_TYPE_INVALID = 0x00, + SQ_DESC_TYPE_HEADER = 0x01, + SQ_DESC_TYPE_CRC = 0x02, + SQ_DESC_TYPE_IMMEDIATE = 0x03, + SQ_DESC_TYPE_GATHER = 0x04, + SQ_DESC_TYPE_MEMORY = 0x05, +}; + +struct sq_crc_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd1:32; + u64 crc_ival:32; + u64 subdesc_type:4; + u64 crc_alg:2; + u64 rsvd0:10; + u64 crc_insert_pos:16; + u64 hdr_start:16; + u64 crc_len:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 crc_len:16; + u64 hdr_start:16; + u64 crc_insert_pos:16; + u64 rsvd0:10; + u64 crc_alg:2; + u64 subdesc_type:4; + u64 crc_ival:32; + u64 rsvd1:32; +#endif +}; + +struct sq_gather_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 ld_type:2; + u64 rsvd0:42; + u64 size:16; + + u64 rsvd1:15; /* W1 */ + u64 addr:49; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 size:16; + u64 rsvd0:42; + u64 ld_type:2; + u64 subdesc_type:4; /* W0 */ + + u64 addr:49; + u64 rsvd1:15; /* W1 */ +#endif +}; + +/* SQ immediate subdescriptor */ +struct sq_imm_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 rsvd0:46; + u64 len:14; + + u64 data:64; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 len:14; + u64 rsvd0:46; + u64 subdesc_type:4; /* W0 */ + + u64 data:64; /* W1 */ +#endif +}; + +struct sq_mem_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 mem_alg:4; + u64 mem_dsz:2; + u64 wmem:1; + u64 rsvd0:21; + u64 offset:32; + + u64 rsvd1:15; /* W1 */ + u64 addr:49; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 offset:32; + u64 rsvd0:21; + u64 wmem:1; + u64 mem_dsz:2; + u64 mem_alg:4; + u64 subdesc_type:4; /* W0 */ + + u64 addr:49; + u64 rsvd1:15; /* W1 */ +#endif +}; + +struct sq_hdr_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; + u64 tso:1; + u64 post_cqe:1; /* Post CQE on no error also */ + u64 dont_send:1; + u64 tstmp:1; + u64 subdesc_cnt:8; + u64 csum_l4:2; + u64 csum_l3:1; + u64 rsvd0:5; + u64 l4_offset:8; + u64 l3_offset:8; + u64 rsvd1:4; + u64 tot_len:20; /* W0 */ + + u64 tso_sdc_cont:8; + u64 tso_sdc_first:8; + u64 tso_l4_offset:8; + u64 tso_flags_last:12; + u64 tso_flags_first:12; + u64 rsvd2:2; + u64 tso_max_paysize:14; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tot_len:20; + u64 rsvd1:4; + u64 l3_offset:8; + u64 l4_offset:8; + u64 rsvd0:5; + u64 csum_l3:1; + u64 csum_l4:2; + u64 subdesc_cnt:8; + u64 tstmp:1; + u64 dont_send:1; + u64 post_cqe:1; /* Post CQE on no error also */ + u64 tso:1; + u64 subdesc_type:4; /* W0 */ + + u64 tso_max_paysize:14; + u64 rsvd2:2; + u64 tso_flags_first:12; + u64 tso_flags_last:12; + u64 tso_l4_offset:8; + u64 tso_sdc_first:8; + u64 tso_sdc_cont:8; /* W1 */ +#endif +}; + +/* Queue config register formats */ +struct rq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_2_63:62; + u64 ena:1; + u64 tcp_ena:1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tcp_ena:1; + u64 ena:1; + u64 reserved_2_63:62; +#endif +}; + +struct cq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_43_63:21; + u64 ena:1; + u64 reset:1; + u64 caching:1; + u64 reserved_35_39:5; + u64 qsize:3; + u64 reserved_25_31:7; + u64 avg_con:9; + u64 reserved_0_15:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 reserved_0_15:16; + u64 avg_con:9; + u64 reserved_25_31:7; + u64 qsize:3; + u64 reserved_35_39:5; + u64 caching:1; + u64 reset:1; + u64 ena:1; + u64 reserved_43_63:21; +#endif +}; + +struct sq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_20_63:44; + u64 ena:1; + u64 reserved_18_18:1; + u64 reset:1; + u64 ldwb:1; + u64 reserved_11_15:5; + u64 qsize:3; + u64 reserved_3_7:5; + u64 tstmp_bgx_intf:3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tstmp_bgx_intf:3; + u64 reserved_3_7:5; + u64 qsize:3; + u64 reserved_11_15:5; + u64 ldwb:1; + u64 reset:1; + u64 reserved_18_18:1; + u64 ena:1; + u64 reserved_20_63:44; +#endif +}; + +struct rbdr_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_45_63:19; + u64 ena:1; + u64 reset:1; + u64 ldwb:1; + u64 reserved_36_41:6; + u64 qsize:4; + u64 reserved_25_31:7; + u64 avg_con:9; + u64 reserved_12_15:4; + u64 lines:12; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 lines:12; + u64 reserved_12_15:4; + u64 avg_con:9; + u64 reserved_25_31:7; + u64 qsize:4; + u64 reserved_36_41:6; + u64 ldwb:1; + u64 reset:1; + u64 ena: 1; + u64 reserved_45_63:19; +#endif +}; + +struct qs_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_32_63:32; + u64 ena:1; + u64 reserved_27_30:4; + u64 sq_ins_ena:1; + u64 sq_ins_pos:6; + u64 lock_ena:1; + u64 lock_viol_cqe_ena:1; + u64 send_tstmp_ena:1; + u64 be:1; + u64 reserved_7_15:9; + u64 vnic:7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 vnic:7; + u64 reserved_7_15:9; + u64 be:1; + u64 send_tstmp_ena:1; + u64 lock_viol_cqe_ena:1; + u64 lock_ena:1; + u64 sq_ins_pos:6; + u64 sq_ins_ena:1; + u64 reserved_27_30:4; + u64 ena:1; + u64 reserved_32_63:32; +#endif +}; + +#endif /* Q_STRUCT_H */ diff --git a/drivers/net/octeontx/smi.c b/drivers/net/octeontx/smi.c new file mode 100644 index 00000000000..8e2c3ca5a30 --- /dev/null +++ b/drivers/net/octeontx/smi.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <malloc.h> +#include <miiphy.h> +#include <misc.h> +#include <pci.h> +#include <pci_ids.h> +#include <phy.h> +#include <asm/io.h> +#include <linux/ctype.h> +#include <linux/delay.h> + +#define PCI_DEVICE_ID_OCTEONTX_SMI 0xA02B + +DECLARE_GLOBAL_DATA_PTR; + +enum octeontx_smi_mode { + CLAUSE22 = 0, + CLAUSE45 = 1, +}; + +enum { + SMI_OP_C22_WRITE = 0, + SMI_OP_C22_READ = 1, + + SMI_OP_C45_ADDR = 0, + SMI_OP_C45_WRITE = 1, + SMI_OP_C45_PRIA = 2, + SMI_OP_C45_READ = 3, +}; + +union smi_x_clk { + u64 u; + struct smi_x_clk_s { + int phase:8; + int sample:4; + int preamble:1; + int clk_idle:1; + int reserved_14_14:1; + int sample_mode:1; + int sample_hi:5; + int reserved_21_23:3; + int mode:1; + } s; +}; + +union smi_x_cmd { + u64 u; + struct smi_x_cmd_s { + int reg_adr:5; + int reserved_5_7:3; + int phy_adr:5; + int reserved_13_15:3; + int phy_op:2; + } s; +}; + +union smi_x_wr_dat { + u64 u; + struct smi_x_wr_dat_s { + unsigned int dat:16; + int val:1; + int pending:1; + } s; +}; + +union smi_x_rd_dat { + u64 u; + struct smi_x_rd_dat_s { + unsigned int dat:16; + int val:1; + int pending:1; + } s; +}; + +union smi_x_en { + u64 u; + struct smi_x_en_s { + int en:1; + } s; +}; + +#define SMI_X_RD_DAT 0x10ull +#define SMI_X_WR_DAT 0x08ull +#define SMI_X_CMD 0x00ull +#define SMI_X_CLK 0x18ull +#define SMI_X_EN 0x20ull + +struct octeontx_smi_priv { + void __iomem *baseaddr; + enum octeontx_smi_mode mode; +}; + +#define MDIO_TIMEOUT 10000 + +void octeontx_smi_setmode(struct mii_dev *bus, enum octeontx_smi_mode mode) +{ + struct octeontx_smi_priv *priv = bus->priv; + union smi_x_clk smix_clk; + + smix_clk.u = readq(priv->baseaddr + SMI_X_CLK); + smix_clk.s.mode = mode; + smix_clk.s.preamble = mode == CLAUSE45; + writeq(smix_clk.u, priv->baseaddr + SMI_X_CLK); + + priv->mode = mode; +} + +int octeontx_c45_addr(struct mii_dev *bus, int addr, int devad, int regnum) +{ + struct octeontx_smi_priv *priv = bus->priv; + + union smi_x_cmd smix_cmd; + union smi_x_wr_dat smix_wr_dat; + unsigned long timeout = MDIO_TIMEOUT; + + smix_wr_dat.u = 0; + smix_wr_dat.s.dat = regnum; + + writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT); + + smix_cmd.u = 0; + smix_cmd.s.phy_op = SMI_OP_C45_ADDR; + smix_cmd.s.phy_adr = addr; + smix_cmd.s.reg_adr = devad; + + writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD); + + do { + smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT); + udelay(100); + timeout--; + } while (smix_wr_dat.s.pending && timeout); + + return timeout == 0; +} + +int octeontx_phy_read(struct mii_dev *bus, int addr, int devad, int regnum) +{ + struct octeontx_smi_priv *priv = bus->priv; + union smi_x_cmd smix_cmd; + union smi_x_rd_dat smix_rd_dat; + unsigned long timeout = MDIO_TIMEOUT; + int ret; + + enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45; + + debug("RD: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n", + mode, priv->baseaddr, addr, devad, regnum); + + octeontx_smi_setmode(bus, mode); + + if (mode == CLAUSE45) { + ret = octeontx_c45_addr(bus, addr, devad, regnum); + + debug("RD: ret: %u\n", ret); + + if (ret) + return 0; + } + + smix_cmd.u = 0; + smix_cmd.s.phy_adr = addr; + + if (mode == CLAUSE45) { + smix_cmd.s.reg_adr = devad; + smix_cmd.s.phy_op = SMI_OP_C45_READ; + } else { + smix_cmd.s.reg_adr = regnum; + smix_cmd.s.phy_op = SMI_OP_C22_READ; + } + + writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD); + + do { + smix_rd_dat.u = readq(priv->baseaddr + SMI_X_RD_DAT); + udelay(10); + timeout--; + } while (smix_rd_dat.s.pending && timeout); + + debug("SMIX_RD_DAT: %lx\n", (unsigned long)smix_rd_dat.u); + + return smix_rd_dat.s.dat; +} + +int octeontx_phy_write(struct mii_dev *bus, int addr, int devad, int regnum, + u16 value) +{ + struct octeontx_smi_priv *priv = bus->priv; + union smi_x_cmd smix_cmd; + union smi_x_wr_dat smix_wr_dat; + unsigned long timeout = MDIO_TIMEOUT; + int ret; + + enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45; + + debug("WR: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n", + mode, priv->baseaddr, addr, devad, regnum); + + if (mode == CLAUSE45) { + ret = octeontx_c45_addr(bus, addr, devad, regnum); + + debug("WR: ret: %u\n", ret); + + if (ret) + return ret; + } + + smix_wr_dat.u = 0; + smix_wr_dat.s.dat = value; + + writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT); + + smix_cmd.u = 0; + smix_cmd.s.phy_adr = addr; + + if (mode == CLAUSE45) { + smix_cmd.s.reg_adr = devad; + smix_cmd.s.phy_op = SMI_OP_C45_WRITE; + } else { + smix_cmd.s.reg_adr = regnum; + smix_cmd.s.phy_op = SMI_OP_C22_WRITE; + } + + writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD); + + do { + smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT); + udelay(10); + timeout--; + } while (smix_wr_dat.s.pending && timeout); + + debug("SMIX_WR_DAT: %lx\n", (unsigned long)smix_wr_dat.u); + + return timeout == 0; +} + +int octeontx_smi_reset(struct mii_dev *bus) +{ + struct octeontx_smi_priv *priv = bus->priv; + + union smi_x_en smi_en; + + smi_en.s.en = 0; + writeq(smi_en.u, priv->baseaddr + SMI_X_EN); + + smi_en.s.en = 1; + writeq(smi_en.u, priv->baseaddr + SMI_X_EN); + + octeontx_smi_setmode(bus, CLAUSE22); + + return 0; +} + +/* PHY XS initialization, primarily for RXAUI + * + */ +int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr) +{ + int reg; + ulong start_time; + int phy_id1, phy_id2; + int oui, model_number; + + phy_id1 = octeontx_phy_read(bus, phy_addr, 1, 0x2); + phy_id2 = octeontx_phy_read(bus, phy_addr, 1, 0x3); + model_number = (phy_id2 >> 4) & 0x3F; + debug("%s model %x\n", __func__, model_number); + oui = phy_id1; + oui <<= 6; + oui |= (phy_id2 >> 10) & 0x3F; + debug("%s oui %x\n", __func__, oui); + switch (oui) { + case 0x5016: + if (model_number == 9) { + debug("%s +\n", __func__); + /* Perform hardware reset in XGXS control */ + reg = octeontx_phy_read(bus, phy_addr, 4, 0x0); + if ((reg & 0xffff) < 0) + goto read_error; + reg |= 0x8000; + octeontx_phy_write(bus, phy_addr, 4, 0x0, reg); + + start_time = get_timer(0); + do { + reg = octeontx_phy_read(bus, phy_addr, 4, 0x0); + if ((reg & 0xffff) < 0) + goto read_error; + } while ((reg & 0x8000) && get_timer(start_time) < 500); + if (reg & 0x8000) { + printf("HW reset for M88X3120 PHY failed"); + printf("MII_BMCR: 0x%x\n", reg); + return -1; + } + /* program 4.49155 with 0x5 */ + octeontx_phy_write(bus, phy_addr, 4, 0xc003, 0x5); + } + break; + default: + break; + } + + return 0; + +read_error: + debug("M88X3120 PHY config read failed\n"); + return -1; +} + +int octeontx_smi_probe(struct udevice *dev) +{ + int ret, subnode, cnt = 0, node = dev->node.of_offset; + struct mii_dev *bus; + struct octeontx_smi_priv *priv; + pci_dev_t bdf = dm_pci_get_bdf(dev); + + debug("SMI PCI device: %x\n", bdf); + dev->req_seq = PCI_FUNC(bdf); + if (!dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM)) { + printf("Failed to map PCI region for bdf %x\n", bdf); + return -1; + } + + fdt_for_each_subnode(subnode, gd->fdt_blob, node) { + ret = fdt_node_check_compatible(gd->fdt_blob, subnode, + "cavium,thunder-8890-mdio"); + if (ret) + continue; + + bus = mdio_alloc(); + priv = malloc(sizeof(*priv)); + if (!bus || !priv) { + printf("Failed to allocate OcteonTX MDIO bus # %u\n", + dev->seq); + return -1; + } + + bus->read = octeontx_phy_read; + bus->write = octeontx_phy_write; + bus->reset = octeontx_smi_reset; + bus->priv = priv; + + priv->mode = CLAUSE22; + priv->baseaddr = (void __iomem *)fdtdec_get_addr(gd->fdt_blob, + subnode, + "reg"); + debug("mdio base addr %p\n", priv->baseaddr); + + /* use given name or generate its own unique name */ + snprintf(bus->name, MDIO_NAME_LEN, "smi%d", cnt++); + + ret = mdio_register(bus); + if (ret) + return ret; + } + return 0; +} + +static const struct udevice_id octeontx_smi_ids[] = { + { .compatible = "cavium,thunder-8890-mdio-nexus" }, + {} +}; + +U_BOOT_DRIVER(octeontx_smi) = { + .name = "octeontx_smi", + .id = UCLASS_MISC, + .probe = octeontx_smi_probe, + .of_match = octeontx_smi_ids, +}; + +static struct pci_device_id octeontx_smi_supported[] = { + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_SMI) }, + {} +}; + +U_BOOT_PCI_DEVICE(octeontx_smi, octeontx_smi_supported); diff --git a/drivers/net/octeontx/xcv.c b/drivers/net/octeontx/xcv.c new file mode 100644 index 00000000000..8dd558b1bf2 --- /dev/null +++ b/drivers/net/octeontx/xcv.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <config.h> +#include <dm.h> +#include <errno.h> +#include <fdt_support.h> +#include <pci.h> +#include <malloc.h> +#include <miiphy.h> +#include <misc.h> +#include <net.h> +#include <netdev.h> +#include <asm/io.h> +#include <linux/delay.h> +#include <linux/libfdt.h> + +#include <asm/arch/csrs/csrs-xcv.h> + +#define XCVX_BASE 0x87E0DB000000ULL + +/* Initialize XCV block */ +void xcv_init_hw(void) +{ + union xcvx_reset reset; + union xcvx_dll_ctl xcv_dll_ctl; + + /* Take the DLL out of reset */ + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + reset.s.dllrst = 0; + writeq(reset.u, XCVX_BASE + XCVX_RESET(0)); + + /* Take the clock tree out of reset */ + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + reset.s.clkrst = 0; + writeq(reset.u, XCVX_BASE + XCVX_RESET(0)); + + /* Once the 125MHz ref clock is stable, wait 10us for DLL to lock */ + udelay(10); + + /* Optionally, bypass the DLL setting */ + xcv_dll_ctl.u = readq(XCVX_BASE + XCVX_DLL_CTL(0)); + xcv_dll_ctl.s.clkrx_set = 0; + xcv_dll_ctl.s.clkrx_byp = 1; + xcv_dll_ctl.s.clktx_byp = 0; + writeq(xcv_dll_ctl.u, XCVX_BASE + XCVX_DLL_CTL(0)); + + /* Enable the compensation controller */ + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + reset.s.comp = 1; + writeq(reset.u, XCVX_BASE + XCVX_RESET(0)); + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + + /* Wait for 1040 reference clock cycles for the compensation state + * machine lock. + */ + udelay(100); + + /* Enable the XCV block */ + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + reset.s.enable = 1; + writeq(reset.u, XCVX_BASE + XCVX_RESET(0)); + + /* set XCV(0)_RESET[CLKRST] to 1 */ + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + reset.s.clkrst = 1; + writeq(reset.u, XCVX_BASE + XCVX_RESET(0)); +} + +/* + * Configure XCV link based on the speed + * link_up : Set to 1 when link is up otherwise 0 + * link_speed: The speed of the link. + */ +void xcv_setup_link(bool link_up, int link_speed) +{ + union xcvx_ctl xcv_ctl; + union xcvx_reset reset; + union xcvx_batch_crd_ret xcv_crd_ret; + int speed = 2; + + /* Check RGMII link */ + if (link_speed == 100) + speed = 1; + else if (link_speed == 10) + speed = 0; + + if (link_up) { + /* Set operating speed */ + xcv_ctl.u = readq(XCVX_BASE + XCVX_CTL(0)); + xcv_ctl.s.speed = speed; + writeq(xcv_ctl.u, XCVX_BASE + XCVX_CTL(0)); + + /* Datapaths come out of reset + * - The datapath resets will disengage BGX from the + * RGMII interface + * - XCV will continue to return TX credits for each tick + * that is sent on the TX data path + */ + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + reset.s.tx_dat_rst_n = 1; + reset.s.rx_dat_rst_n = 1; + writeq(reset.u, XCVX_BASE + XCVX_RESET(0)); + + /* Enable packet flow */ + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + reset.s.tx_pkt_rst_n = 1; + reset.s.rx_pkt_rst_n = 1; + writeq(reset.u, XCVX_BASE + XCVX_RESET(0)); + + xcv_crd_ret.u = readq(XCVX_BASE + XCVX_BATCH_CRD_RET(0)); + xcv_crd_ret.s.crd_ret = 1; + writeq(xcv_crd_ret.u, XCVX_BASE + XCVX_BATCH_CRD_RET(0)); + } else { + /* Enable packet flow */ + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + reset.s.tx_pkt_rst_n = 0; + reset.s.rx_pkt_rst_n = 0; + writeq(reset.u, XCVX_BASE + XCVX_RESET(0)); + reset.u = readq(XCVX_BASE + XCVX_RESET(0)); + } +} diff --git a/drivers/net/octeontx2/Makefile b/drivers/net/octeontx2/Makefile new file mode 100644 index 00000000000..c9300727aef --- /dev/null +++ b/drivers/net/octeontx2/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) 2018 Marvell International Ltd. +# + +obj-$(CONFIG_NET_OCTEONTX2) += cgx.o nix_af.o nix.o rvu_pf.o \ + rvu_af.o rvu_common.o + diff --git a/drivers/net/octeontx2/cgx.c b/drivers/net/octeontx2/cgx.c new file mode 100644 index 00000000000..ff2ebc25ce1 --- /dev/null +++ b/drivers/net/octeontx2/cgx.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <errno.h> +#include <malloc.h> +#include <misc.h> +#include <net.h> +#include <pci_ids.h> +#include <linux/list.h> +#include <asm/arch/board.h> +#include <asm/arch/csrs/csrs-cgx.h> +#include <asm/io.h> + +#include "cgx.h" + +char lmac_type_to_str[][8] = { + "SGMII", + "XAUI", + "RXAUI", + "10G_R", + "40G_R", + "RGMII", + "QSGMII", + "25G_R", + "50G_R", + "100G_R", + "USXGMII", +}; + +char lmac_speed_to_str[][8] = { + "0", + "10M", + "100M", + "1G", + "2.5G", + "5G", + "10G", + "20G", + "25G", + "40G", + "50G", + "80G", + "100G", +}; + +/** + * Given an LMAC/PF instance number, return the lmac + * Per design, each PF has only one LMAC mapped. + * + * @param instance instance to find + * + * @return pointer to lmac data structure or NULL if not found + */ +struct lmac *nix_get_cgx_lmac(int lmac_instance) +{ + struct cgx *cgx; + struct udevice *dev; + int i, idx, err; + + for (i = 0; i < CGX_PER_NODE; i++) { + err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX2_CGX, i, + &dev); + if (err) + continue; + + cgx = dev_get_priv(dev); + debug("%s udev %p cgx %p instance %d\n", __func__, dev, cgx, + lmac_instance); + for (idx = 0; idx < cgx->lmac_count; idx++) { + if (cgx->lmac[idx]->instance == lmac_instance) + return cgx->lmac[idx]; + } + } + return NULL; +} + +void cgx_lmac_mac_filter_clear(struct lmac *lmac) +{ + union cgxx_cmrx_rx_dmac_ctl0 dmac_ctl0; + union cgxx_cmr_rx_dmacx_cam0 dmac_cam0; + void *reg_addr; + + dmac_cam0.u = 0x0; + reg_addr = lmac->cgx->reg_base + + CGXX_CMR_RX_DMACX_CAM0(lmac->lmac_id * 8); + writeq(dmac_cam0.u, reg_addr); + debug("%s: reg %p dmac_cam0 %llx\n", __func__, reg_addr, dmac_cam0.u); + + dmac_ctl0.u = 0x0; + dmac_ctl0.s.bcst_accept = 1; + dmac_ctl0.s.mcst_mode = 1; + dmac_ctl0.s.cam_accept = 0; + reg_addr = lmac->cgx->reg_base + + CGXX_CMRX_RX_DMAC_CTL0(lmac->lmac_id); + writeq(dmac_ctl0.u, reg_addr); + debug("%s: reg %p dmac_ctl0 %llx\n", __func__, reg_addr, dmac_ctl0.u); +} + +void cgx_lmac_mac_filter_setup(struct lmac *lmac) +{ + union cgxx_cmrx_rx_dmac_ctl0 dmac_ctl0; + union cgxx_cmr_rx_dmacx_cam0 dmac_cam0; + u64 mac, tmp; + void *reg_addr; + + memcpy((void *)&tmp, lmac->mac_addr, 6); + debug("%s: tmp %llx\n", __func__, tmp); + debug("%s: swab tmp %llx\n", __func__, swab64(tmp)); + mac = swab64(tmp) >> 16; + debug("%s: mac %llx\n", __func__, mac); + dmac_cam0.u = 0x0; + dmac_cam0.s.id = lmac->lmac_id; + dmac_cam0.s.adr = mac; + dmac_cam0.s.en = 1; + reg_addr = lmac->cgx->reg_base + + CGXX_CMR_RX_DMACX_CAM0(lmac->lmac_id * 8); + writeq(dmac_cam0.u, reg_addr); + debug("%s: reg %p dmac_cam0 %llx\n", __func__, reg_addr, dmac_cam0.u); + dmac_ctl0.u = 0x0; + dmac_ctl0.s.bcst_accept = 1; + dmac_ctl0.s.mcst_mode = 0; + dmac_ctl0.s.cam_accept = 1; + reg_addr = lmac->cgx->reg_base + + CGXX_CMRX_RX_DMAC_CTL0(lmac->lmac_id); + writeq(dmac_ctl0.u, reg_addr); + debug("%s: reg %p dmac_ctl0 %llx\n", __func__, reg_addr, dmac_ctl0.u); +} + +int cgx_lmac_set_pkind(struct lmac *lmac, u8 lmac_id, int pkind) +{ + cgx_write(lmac->cgx, lmac_id, CGXX_CMRX_RX_ID_MAP(0), + (pkind & 0x3f)); + return 0; +} + +int cgx_lmac_link_status(struct lmac *lmac, int lmac_id, u64 *status) +{ + int ret = 0; + + ret = cgx_intf_get_link_sts(lmac->cgx->cgx_id, lmac_id, status); + if (ret) { + debug("%s request failed for cgx%d lmac%d\n", + __func__, lmac->cgx->cgx_id, lmac->lmac_id); + ret = -1; + } + return ret; +} + +int cgx_lmac_rx_tx_enable(struct lmac *lmac, int lmac_id, bool enable) +{ + struct cgx *cgx = lmac->cgx; + union cgxx_cmrx_config cmrx_config; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cmrx_config.u = cgx_read(cgx, lmac_id, CGXX_CMRX_CONFIG(0)); + cmrx_config.s.data_pkt_rx_en = + cmrx_config.s.data_pkt_tx_en = enable ? 1 : 0; + cgx_write(cgx, lmac_id, CGXX_CMRX_CONFIG(0), cmrx_config.u); + return 0; +} + +int cgx_lmac_link_enable(struct lmac *lmac, int lmac_id, bool enable, + u64 *status) +{ + int ret = 0; + + ret = cgx_intf_link_up_dwn(lmac->cgx->cgx_id, lmac_id, enable, + status); + if (ret) { + debug("%s request failed for cgx%d lmac%d\n", + __func__, lmac->cgx->cgx_id, lmac->lmac_id); + ret = -1; + } + return ret; +} + +int cgx_lmac_internal_loopback(struct lmac *lmac, int lmac_id, bool enable) +{ + struct cgx *cgx = lmac->cgx; + union cgxx_cmrx_config cmrx_cfg; + union cgxx_gmp_pcs_mrx_control mrx_control; + union cgxx_spux_control1 spux_control1; + enum lmac_type lmac_type; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cmrx_cfg.u = cgx_read(cgx, lmac_id, CGXX_CMRX_CONFIG(0)); + lmac_type = cmrx_cfg.s.lmac_type; + if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) { + mrx_control.u = cgx_read(cgx, lmac_id, + CGXX_GMP_PCS_MRX_CONTROL(0)); + mrx_control.s.loopbck1 = enable ? 1 : 0; + cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CONTROL(0), + mrx_control.u); + } else { + spux_control1.u = cgx_read(cgx, lmac_id, + CGXX_SPUX_CONTROL1(0)); + spux_control1.s.loopbck = enable ? 1 : 0; + cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1(0), + spux_control1.u); + } + return 0; +} + +static int cgx_lmac_init(struct cgx *cgx) +{ + struct lmac *lmac; + union cgxx_cmrx_config cmrx_cfg; + static int instance = 1; + int i; + + cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMR_RX_LMACS()); + debug("%s: Found %d lmacs for cgx %d@%p\n", __func__, cgx->lmac_count, + cgx->cgx_id, cgx->reg_base); + + for (i = 0; i < cgx->lmac_count; i++) { + lmac = calloc(1, sizeof(*lmac)); + if (!lmac) + return -ENOMEM; + lmac->instance = instance++; + snprintf(lmac->name, sizeof(lmac->name), "cgx_fwi_%d_%d", + cgx->cgx_id, i); + /* Get LMAC type */ + cmrx_cfg.u = cgx_read(cgx, i, CGXX_CMRX_CONFIG(0)); + lmac->lmac_type = cmrx_cfg.s.lmac_type; + + lmac->lmac_id = i; + lmac->cgx = cgx; + cgx->lmac[i] = lmac; + debug("%s: map id %d to lmac %p (%s), type:%d instance %d\n", + __func__, i, lmac, lmac->name, lmac->lmac_type, + lmac->instance); + lmac->init_pend = 1; + printf("CGX%d LMAC%d [%s]\n", lmac->cgx->cgx_id, + lmac->lmac_id, lmac_type_to_str[lmac->lmac_type]); + octeontx2_board_get_mac_addr((lmac->instance - 1), + lmac->mac_addr); + debug("%s: MAC %pM\n", __func__, lmac->mac_addr); + cgx_lmac_mac_filter_setup(lmac); + } + return 0; +} + +int cgx_probe(struct udevice *dev) +{ + struct cgx *cgx = dev_get_priv(dev); + int err; + + cgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, + PCI_REGION_MEM); + cgx->dev = dev; + cgx->cgx_id = ((u64)(cgx->reg_base) >> 24) & 0x7; + + debug("%s CGX BAR %p, id: %d\n", __func__, cgx->reg_base, + cgx->cgx_id); + debug("%s CGX %p, udev: %p\n", __func__, cgx, dev); + + err = cgx_lmac_init(cgx); + + return err; +} + +int cgx_remove(struct udevice *dev) +{ + struct cgx *cgx = dev_get_priv(dev); + int i; + + debug("%s: cgx remove reg_base %p cgx_id %d", + __func__, cgx->reg_base, cgx->cgx_id); + for (i = 0; i < cgx->lmac_count; i++) + cgx_lmac_mac_filter_clear(cgx->lmac[i]); + + return 0; +} + +U_BOOT_DRIVER(cgx) = { + .name = "cgx", + .id = UCLASS_MISC, + .probe = cgx_probe, + .remove = cgx_remove, + .priv_auto_alloc_size = sizeof(struct cgx), +}; + +static struct pci_device_id cgx_supported[] = { + {PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_CGX) }, + {} +}; + +U_BOOT_PCI_DEVICE(cgx, cgx_supported); diff --git a/drivers/net/octeontx2/cgx.h b/drivers/net/octeontx2/cgx.h new file mode 100644 index 00000000000..f287692712c --- /dev/null +++ b/drivers/net/octeontx2/cgx.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef __CGX_H__ +#define __CGX_H__ + +#include "cgx_intf.h" + +#define PCI_DEVICE_ID_OCTEONTX2_CGX 0xA059 + +#define MAX_LMAC_PER_CGX 4 +#define CGX_PER_NODE 3 + +enum lmac_type { + LMAC_MODE_SGMII = 0, + LMAC_MODE_XAUI = 1, + LMAC_MODE_RXAUI = 2, + LMAC_MODE_10G_R = 3, + LMAC_MODE_40G_R = 4, + LMAC_MODE_QSGMII = 6, + LMAC_MODE_25G_R = 7, + LMAC_MODE_50G_R = 8, + LMAC_MODE_100G_R = 9, + LMAC_MODE_USXGMII = 10, +}; + +extern char lmac_type_to_str[][8]; + +extern char lmac_speed_to_str[][8]; + +struct lmac_priv { + u8 enable:1; + u8 full_duplex:1; + u8 speed:4; + u8 mode:1; + u8 rsvd:1; + u8 mac_addr[6]; +}; + +struct cgx; +struct nix; +struct nix_af; + +struct lmac { + struct cgx *cgx; + struct nix *nix; + char name[16]; + enum lmac_type lmac_type; + bool init_pend; + u8 instance; + u8 lmac_id; + u8 pknd; + u8 link_num; + u32 chan_num; + u8 mac_addr[6]; +}; + +struct cgx { + struct nix_af *nix_af; + void __iomem *reg_base; + struct udevice *dev; + struct lmac *lmac[MAX_LMAC_PER_CGX]; + u8 cgx_id; + u8 lmac_count; +}; + +static inline void cgx_write(struct cgx *cgx, u8 lmac, u64 offset, u64 val) +{ + writeq(val, cgx->reg_base + CMR_SHIFT(lmac) + offset); +} + +static inline u64 cgx_read(struct cgx *cgx, u8 lmac, u64 offset) +{ + return readq(cgx->reg_base + CMR_SHIFT(lmac) + offset); +} + +/** + * Given an LMAC/PF instance number, return the lmac + * Per design, each PF has only one LMAC mapped. + * + * @param instance instance to find + * + * @return pointer to lmac data structure or NULL if not found + */ +struct lmac *nix_get_cgx_lmac(int lmac_instance); + +int cgx_lmac_set_pkind(struct lmac *lmac, u8 lmac_id, int pkind); +int cgx_lmac_internal_loopback(struct lmac *lmac, int lmac_id, bool enable); +int cgx_lmac_rx_tx_enable(struct lmac *lmac, int lmac_id, bool enable); +int cgx_lmac_link_enable(struct lmac *lmac, int lmac_id, bool enable, + u64 *status); +int cgx_lmac_link_status(struct lmac *lmac, int lmac_id, u64 *status); +void cgx_lmac_mac_filter_setup(struct lmac *lmac); + +int cgx_intf_get_link_sts(u8 cgx, u8 lmac, u64 *lnk_sts); +int cgx_intf_link_up_dwn(u8 cgx, u8 lmac, u8 up_dwn, u64 *lnk_sts); +int cgx_intf_get_mac_addr(u8 cgx, u8 lmac, u8 *mac); +int cgx_intf_set_macaddr(struct udevice *dev); +int cgx_intf_prbs(u8 qlm, u8 mode, u32 time, u8 lane); +int cgx_intf_display_eye(u8 qlm, u8 lane); +int cgx_intf_display_serdes(u8 qlm, u8 lane); + +#endif /* __CGX_H__ */ diff --git a/drivers/net/octeontx2/cgx_intf.c b/drivers/net/octeontx2/cgx_intf.c new file mode 100644 index 00000000000..37d9a2bb730 --- /dev/null +++ b/drivers/net/octeontx2/cgx_intf.c @@ -0,0 +1,715 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <errno.h> +#include <malloc.h> +#include <misc.h> +#include <net.h> + +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/list.h> + +#include <asm/arch/board.h> +#include <asm/io.h> + +#include "cgx_intf.h" +#include "cgx.h" +#include "nix.h" + +static u64 cgx_rd_scrx(u8 cgx, u8 lmac, u8 index) +{ + u64 addr; + + addr = (index == 1) ? CGX_CMR_SCRATCH1 : CGX_CMR_SCRATCH0; + addr += CGX_SHIFT(cgx) + CMR_SHIFT(lmac); + return readq(addr); +} + +static void cgx_wr_scrx(u8 cgx, u8 lmac, u8 index, u64 val) +{ + u64 addr; + + addr = (index == 1) ? CGX_CMR_SCRATCH1 : CGX_CMR_SCRATCH0; + addr += CGX_SHIFT(cgx) + CMR_SHIFT(lmac); + writeq(val, addr); +} + +static u64 cgx_rd_scr0(u8 cgx, u8 lmac) +{ + return cgx_rd_scrx(cgx, lmac, 0); +} + +static u64 cgx_rd_scr1(u8 cgx, u8 lmac) +{ + return cgx_rd_scrx(cgx, lmac, 1); +} + +static void cgx_wr_scr0(u8 cgx, u8 lmac, u64 val) +{ + return cgx_wr_scrx(cgx, lmac, 0, val); +} + +static void cgx_wr_scr1(u8 cgx, u8 lmac, u64 val) +{ + return cgx_wr_scrx(cgx, lmac, 1, val); +} + +static inline void set_ownership(u8 cgx, u8 lmac, u8 val) +{ + union cgx_scratchx1 scr1; + + scr1.u = cgx_rd_scr1(cgx, lmac); + scr1.s.own_status = val; + cgx_wr_scr1(cgx, lmac, scr1.u); +} + +static int wait_for_ownership(u8 cgx, u8 lmac) +{ + union cgx_scratchx1 scr1; + union cgx_scratchx0 scr0; + u64 cmrx_int; + int timeout = 5000; + + do { + scr1.u = cgx_rd_scr1(cgx, lmac); + scr0.u = cgx_rd_scr0(cgx, lmac); + /* clear async events if any */ + if (scr0.s.evt_sts.evt_type == CGX_EVT_ASYNC && + scr0.s.evt_sts.ack) { + /* clear interrupt */ + cmrx_int = readq(CGX_CMR_INT + + CGX_SHIFT(cgx) + CMR_SHIFT(lmac)); + cmrx_int |= 0x2; // Overflw bit + writeq(cmrx_int, CGX_CMR_INT + + CGX_SHIFT(cgx) + CMR_SHIFT(lmac)); + + /* clear ack */ + scr0.s.evt_sts.ack = 0; + cgx_wr_scr0(cgx, lmac, scr0.u); + } + + if (timeout-- < 0) { + debug("timeout waiting for ownership\n"); + return -ETIMEDOUT; + } + mdelay(1); + } while ((scr1.s.own_status == CGX_OWN_FIRMWARE) && + scr0.s.evt_sts.ack); + + return 0; +} + +int cgx_intf_req(u8 cgx, u8 lmac, union cgx_cmd_s cmd_args, u64 *rsp, + int use_cmd_id_only) +{ + union cgx_scratchx1 scr1; + union cgx_scratchx0 scr0; + u64 cmrx_int; + int timeout = 500; + int err = 0; + u8 cmd = cmd_args.cmd.id; + + if (wait_for_ownership(cgx, lmac)) { + err = -ETIMEDOUT; + goto error; + } + + /* send command */ + scr1.u = cgx_rd_scr1(cgx, lmac); + + if (use_cmd_id_only) { + scr1.s.cmd.id = cmd; + } else { + cmd_args.own_status = scr1.s.own_status; + scr1.s = cmd_args; + } + cgx_wr_scr1(cgx, lmac, scr1.u); + + set_ownership(cgx, lmac, CGX_OWN_FIRMWARE); + + /* wait for response and ownership */ + do { + scr0.u = cgx_rd_scr0(cgx, lmac); + scr1.u = cgx_rd_scr1(cgx, lmac); + mdelay(10); + } while (timeout-- && (!scr0.s.evt_sts.ack) && + (scr1.s.own_status == CGX_OWN_FIRMWARE)); + if (timeout < 0) { + debug("%s timeout waiting for ack\n", __func__); + err = -ETIMEDOUT; + goto error; + } + + if (cmd == CGX_CMD_INTF_SHUTDOWN) + goto error; + + if (scr0.s.evt_sts.evt_type != CGX_EVT_CMD_RESP) { + debug("%s received async event instead of cmd resp event\n", + __func__); + err = -1; + goto error; + } + if (scr0.s.evt_sts.id != cmd) { + debug("%s received resp for cmd %d expected cmd %d\n", + __func__, scr0.s.evt_sts.id, cmd); + err = -1; + goto error; + } + if (scr0.s.evt_sts.stat != CGX_STAT_SUCCESS) { + debug("%s cmd%d failed on cgx%u lmac%u with errcode %d\n", + __func__, cmd, cgx, lmac, scr0.s.link_sts.err_type); + err = -1; + } + +error: + /* clear interrupt */ + cmrx_int = readq(CGX_CMR_INT + CGX_SHIFT(cgx) + CMR_SHIFT(lmac)); + cmrx_int |= 0x2; // Overflw bit + writeq(cmrx_int, CGX_CMR_INT + CGX_SHIFT(cgx) + CMR_SHIFT(lmac)); + + /* clear ownership and ack */ + scr0.s.evt_sts.ack = 0; + cgx_wr_scr0(cgx, lmac, scr0.u); + + *rsp = err ? 0 : scr0.u; + + return err; +} + +int cgx_intf_get_mac_addr(u8 cgx, u8 lmac, u8 *mac) +{ + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_GET_MAC_ADDR; + + ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1); + if (ret) + return -1; + + scr0.u >>= 9; + memcpy(mac, &scr0.u, 6); + + return 0; +} + +int cgx_intf_get_ver(u8 cgx, u8 lmac, u8 *ver) +{ + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_GET_FW_VER; + + ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1); + if (ret) + return -1; + + scr0.u >>= 9; + *ver = scr0.u & 0xFFFF; + + return 0; +} + +int cgx_intf_get_link_sts(u8 cgx, u8 lmac, u64 *lnk_sts) +{ + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_GET_LINK_STS; + + ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1); + if (ret) + return -1; + + scr0.u >>= 9; + /* pass the same format as cgx_lnk_sts_s + * err_type:10, speed:4, full_duplex:1, link_up:1 + */ + *lnk_sts = scr0.u & 0xFFFF; + return 0; +} + +int cgx_intf_link_up_dwn(u8 cgx, u8 lmac, u8 up_dwn, u64 *lnk_sts) +{ + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = up_dwn ? CGX_CMD_LINK_BRING_UP : CGX_CMD_LINK_BRING_DOWN; + + ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1); + if (ret) + return -1; + + scr0.u >>= 9; + /* pass the same format as cgx_lnk_sts_s + * err_type:10, speed:4, full_duplex:1, link_up:1 + */ + *lnk_sts = scr0.u & 0xFFFF; + return 0; +} + +void cgx_intf_shutdown(void) +{ + union cgx_scratchx0 scr0; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_INTF_SHUTDOWN; + + cgx_intf_req(0, 0, cmd, &scr0.u, 1); +} + +int cgx_intf_prbs(u8 qlm, u8 mode, u32 time, u8 lane) +{ + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_PRBS; + + cmd.prbs_args.qlm = qlm; + cmd.prbs_args.mode = mode; + cmd.prbs_args.time = time; + cmd.prbs_args.lane = lane; + + ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0); + if (ret) + return -1; + + return 0; +} + +enum cgx_mode { + MODE_10G_C2C, + MODE_10G_C2M, + MODE_10G_KR, + MODE_25G_C2C, + MODE_25G_2_C2C, + MODE_50G_C2C, + MODE_50G_4_C2C +}; + +static char intf_speed_to_str[][8] = { + "10M", + "100M", + "1G", + "2.5G", + "5G", + "10G", + "20G", + "25G", + "40G", + "50G", + "80G", + "100G", +}; + +static void mode_to_args(int mode, struct cgx_mode_change_args *args) +{ + args->an = 0; + args->duplex = 0; + args->port = 0; + + switch (mode) { + case MODE_10G_C2C: + args->speed = CGX_LINK_10G; + args->mode = BIT_ULL(CGX_MODE_10G_C2C_BIT); + break; + case MODE_10G_C2M: + args->speed = CGX_LINK_10G; + args->mode = BIT_ULL(CGX_MODE_10G_C2M_BIT); + break; + case MODE_10G_KR: + args->speed = CGX_LINK_10G; + args->mode = BIT_ULL(CGX_MODE_10G_KR_BIT); + args->an = 1; + break; + case MODE_25G_C2C: + args->speed = CGX_LINK_25G; + args->mode = BIT_ULL(CGX_MODE_25G_C2C_BIT); + break; + case MODE_25G_2_C2C: + args->speed = CGX_LINK_25G; + args->mode = BIT_ULL(CGX_MODE_25G_2_C2C_BIT); + break; + case MODE_50G_C2C: + args->speed = CGX_LINK_50G; + args->mode = BIT_ULL(CGX_MODE_50G_C2C_BIT); + break; + case MODE_50G_4_C2C: + args->speed = CGX_LINK_50G; + args->mode = BIT_ULL(CGX_MODE_50G_4_C2C_BIT); + } +} + +int cgx_intf_set_mode(struct udevice *ethdev, int mode) +{ + struct rvu_pf *rvu = dev_get_priv(ethdev); + struct nix *nix = rvu->nix; + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_MODE_CHANGE; + + mode_to_args(mode, &cmd.mode_change_args); + + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 0); + if (ret) { + printf("Mode change command failed for %s\n", ethdev->name); + return -1; + } + + cmd.cmd.id = CGX_CMD_GET_LINK_STS; + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 1); + if (ret) { + printf("Get Link Status failed for %s\n", ethdev->name); + return -1; + } + + printf("Current Link Status: "); + if (scr0.s.link_sts.speed) { + printf("%s\n", intf_speed_to_str[scr0.s.link_sts.speed]); + switch (scr0.s.link_sts.fec) { + case 0: + printf("FEC_NONE\n"); + break; + case 1: + printf("FEC_BASE_R\n"); + break; + case 2: + printf("FEC_RS\n"); + break; + } + printf("Auto Negotiation %sabled\n", + scr0.s.link_sts.an ? "En" : "Dis"); + printf("%s Duplex\n", + scr0.s.link_sts.full_duplex ? "Full" : "Half"); + } else { + printf("Down\n"); + } + return 0; +} + +int cgx_intf_get_mode(struct udevice *ethdev) +{ + struct rvu_pf *rvu = dev_get_priv(ethdev); + struct nix *nix = rvu->nix; + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_GET_LINK_STS; + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 1); + if (ret) { + printf("Get link status failed for %s\n", ethdev->name); + return -1; + } + printf("Current Interface Mode: "); + switch (scr0.s.link_sts.mode) { + case CGX_MODE_10G_C2C_BIT: + printf("10G_C2C\n"); + break; + case CGX_MODE_10G_C2M_BIT: + printf("10G_C2M\n"); + break; + case CGX_MODE_10G_KR_BIT: + printf("10G_KR\n"); + break; + case CGX_MODE_25G_C2C_BIT: + printf("25G_C2C\n"); + break; + case CGX_MODE_25G_2_C2C_BIT: + printf("25G_2_C2C\n"); + break; + case CGX_MODE_50G_C2C_BIT: + printf("50G_C2C\n"); + break; + case CGX_MODE_50G_4_C2C_BIT: + printf("50G_4_C2C\n"); + break; + default: + printf("Unknown\n"); + break; + } + return 0; +} + +int cgx_intf_get_fec(struct udevice *ethdev) +{ + struct rvu_pf *rvu = dev_get_priv(ethdev); + struct nix *nix = rvu->nix; + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_GET_SUPPORTED_FEC; + + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 1); + if (ret) { + printf("Get supported FEC failed for %s\n", ethdev->name); + return -1; + } + + printf("Supported FEC type: "); + switch (scr0.s.supported_fec.fec) { + case 0: + printf("FEC_NONE\n"); + break; + case 1: + printf("FEC_BASE_R\n"); + break; + case 2: + printf("FEC_RS\n"); + break; + case 3: + printf("FEC_BASE_R FEC_RS\n"); + break; + } + + cmd.cmd.id = CGX_CMD_GET_LINK_STS; + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 1); + if (ret) { + printf("Get active fec failed for %s\n", ethdev->name); + return -1; + } + printf("Active FEC type: "); + switch (scr0.s.link_sts.fec) { + case 0: + printf("FEC_NONE\n"); + break; + case 1: + printf("FEC_BASE_R\n"); + break; + case 2: + printf("FEC_RS\n"); + break; + } + return 0; +} + +int cgx_intf_set_fec(struct udevice *ethdev, int type) +{ + struct rvu_pf *rvu = dev_get_priv(ethdev); + struct nix *nix = rvu->nix; + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_SET_FEC; + cmd.fec_args.fec = type; + + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 0); + if (ret) { + printf("Set FEC type %d failed for %s\n", type, ethdev->name); + return -1; + } + return 0; +} + +int cgx_intf_get_phy_mod_type(struct udevice *ethdev) +{ + struct rvu_pf *rvu = dev_get_priv(ethdev); + struct nix *nix = rvu->nix; + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_GET_PHY_MOD_TYPE; + + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 1); + if (ret) { + printf("Get PHYMOD type failed for %s\n", ethdev->name); + return -1; + } + printf("Current phy mod type %s\n", + scr0.s.phy_mod_type.mod ? "PAM4" : "NRZ"); + return 0; +} + +int cgx_intf_set_phy_mod_type(struct udevice *ethdev, int type) +{ + struct rvu_pf *rvu = dev_get_priv(ethdev); + struct nix *nix = rvu->nix; + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_SET_PHY_MOD_TYPE; + cmd.phy_mod_args.mod = type; + + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 0); + if (ret) { + printf("Set PHYMOD type %d failed for %s\n", type, + ethdev->name); + return -1; + } + + return 0; +} + +int cgx_intf_set_an_lbk(struct udevice *ethdev, int enable) +{ + struct rvu_pf *rvu = dev_get_priv(ethdev); + struct nix *nix = rvu->nix; + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_AN_LOOPBACK; + cmd.cmd_args.enable = enable; + + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 0); + if (ret) { + printf("Set AN loopback command failed on %s\n", ethdev->name); + return -1; + } + printf("AN loopback %s for %s\n", enable ? "set" : "clear", + ethdev->name); + + return 0; +} + +int cgx_intf_get_ignore(struct udevice *ethdev, int cgx, int lmac) +{ + struct rvu_pf *rvu; + struct nix *nix; + union cgx_scratchx0 scr0; + int ret, cgx_id = cgx, lmac_id = lmac; + union cgx_cmd_s cmd; + + if (ethdev) { + rvu = dev_get_priv(ethdev); + nix = rvu->nix; + cgx_id = nix->lmac->cgx->cgx_id; + lmac_id = nix->lmac->lmac_id; + } + cmd.cmd.id = CGX_CMD_GET_PERSIST_IGNORE; + + ret = cgx_intf_req(cgx_id, lmac_id, cmd, &scr0.u, 1); + if (ret) { + if (ethdev) + printf("Get ignore command failed for %s\n", + ethdev->name); + else + printf("Get ignore command failed for CGX%d LMAC%d\n", + cgx_id, lmac_id); + return -1; + } + if (ethdev) + printf("Persist settings %signored for %s\n", + scr0.s.persist.ignore ? "" : "not ", ethdev->name); + else + printf("Persist settings %signored for CGX%d LMAC%d\n", + scr0.s.persist.ignore ? "" : "not ", cgx_id, lmac_id); + + return 0; +} + +int cgx_intf_set_ignore(struct udevice *ethdev, int cgx, int lmac, int ignore) +{ + struct rvu_pf *rvu; + struct nix *nix; + union cgx_scratchx0 scr0; + int ret, cgx_id = cgx, lmac_id = lmac; + union cgx_cmd_s cmd; + + if (ethdev) { + rvu = dev_get_priv(ethdev); + nix = rvu->nix; + cgx_id = nix->lmac->cgx->cgx_id; + lmac_id = nix->lmac->lmac_id; + } + cmd.cmd.id = CGX_CMD_SET_PERSIST_IGNORE; + cmd.persist_args.ignore = ignore; + + ret = cgx_intf_req(cgx_id, lmac_id, cmd, &scr0.u, 0); + if (ret) { + if (ethdev) + printf("Set ignore command failed for %s\n", + ethdev->name); + else + printf("Set ignore command failed for CGX%d LMAC%d\n", + cgx_id, lmac_id); + return -1; + } + + return 0; +} + +int cgx_intf_set_macaddr(struct udevice *ethdev) +{ + struct rvu_pf *rvu = dev_get_priv(ethdev); + struct nix *nix = rvu->nix; + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + u64 mac, tmp; + + memcpy((void *)&tmp, nix->lmac->mac_addr, 6); + mac = swab64(tmp) >> 16; + cmd.cmd.id = CGX_CMD_SET_MAC_ADDR; + cmd.mac_args.addr = mac; + cmd.mac_args.pf_id = rvu->pfid; + + ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id, + cmd, &scr0.u, 0); + if (ret) { + printf("Set user mac addr failed for %s\n", ethdev->name); + return -1; + } + + return 0; +} + +int cgx_intf_display_eye(u8 qlm, u8 lane) +{ + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_DISPLAY_EYE; + + cmd.dsp_eye_args.qlm = qlm; + cmd.dsp_eye_args.lane = lane; + + ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0); + if (ret) + return -1; + + return 0; +} + +int cgx_intf_display_serdes(u8 qlm, u8 lane) +{ + union cgx_scratchx0 scr0; + int ret; + union cgx_cmd_s cmd; + + cmd.cmd.id = CGX_CMD_DISPLAY_SERDES; + + cmd.dsp_eye_args.qlm = qlm; + cmd.dsp_eye_args.lane = lane; + + ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0); + if (ret) + return -1; + + return 0; +} diff --git a/drivers/net/octeontx2/cgx_intf.h b/drivers/net/octeontx2/cgx_intf.h new file mode 100644 index 00000000000..62a7203ad86 --- /dev/null +++ b/drivers/net/octeontx2/cgx_intf.h @@ -0,0 +1,448 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef __CGX_INTF_H__ +#define __CGX_INTF_H__ + +#define CGX_FIRMWARE_MAJOR_VER 1 +#define CGX_FIRMWARE_MINOR_VER 0 + +/* Register offsets */ +#define CGX_CMR_INT 0x87e0e0000040 +#define CGX_CMR_SCRATCH0 0x87e0e0001050 +#define CGX_CMR_SCRATCH1 0x87e0e0001058 + +#define CGX_SHIFT(x) (0x1000000 * ((x) & 0x3)) +#define CMR_SHIFT(x) (0x40000 * ((x) & 0x3)) + +/* CGX error types. set for cmd response status as CGX_STAT_FAIL */ +enum cgx_error_type { + CGX_ERR_NONE, + CGX_ERR_LMAC_NOT_ENABLED, + CGX_ERR_LMAC_MODE_INVALID, + CGX_ERR_REQUEST_ID_INVALID, + CGX_ERR_PREV_ACK_NOT_CLEAR, + CGX_ERR_PHY_LINK_DOWN, + CGX_ERR_PCS_RESET_FAIL, + CGX_ERR_AN_CPT_FAIL, + CGX_ERR_TX_NOT_IDLE, + CGX_ERR_RX_NOT_IDLE, + CGX_ERR_SPUX_BR_BLKLOCK_FAIL, + CGX_ERR_SPUX_RX_ALIGN_FAIL, + CGX_ERR_SPUX_TX_FAULT, + CGX_ERR_SPUX_RX_FAULT, + CGX_ERR_SPUX_RESET_FAIL, + CGX_ERR_SPUX_AN_RESET_FAIL, + CGX_ERR_SPUX_USX_AN_RESET_FAIL, + CGX_ERR_SMUX_RX_LINK_NOT_OK, + CGX_ERR_PCS_LINK_FAIL, + CGX_ERR_TRAINING_FAIL, + CGX_ERR_RX_EQU_FAIL, + CGX_ERR_SPUX_BER_FAIL, + CGX_ERR_SPUX_RSFEC_ALGN_FAIL, + CGX_ERR_SPUX_MARKER_LOCK_FAIL, + CGX_ERR_SET_FEC_INVALID, + CGX_ERR_SET_FEC_FAIL, + CGX_ERR_MODULE_INVALID, + CGX_ERR_MODULE_NOT_PRESENT, + CGX_ERR_SPEED_CHANGE_INVALID, /* = 28 */ + /* FIXME : add more error types when adding support for new modes */ +}; + +/* LINK speed types */ +enum cgx_link_speed { + CGX_LINK_NONE, + CGX_LINK_10M, + CGX_LINK_100M, + CGX_LINK_1G, + CGX_LINK_2HG, /* 2.5 Gbps */ + CGX_LINK_5G, + CGX_LINK_10G, + CGX_LINK_20G, + CGX_LINK_25G, + CGX_LINK_40G, + CGX_LINK_50G, + CGX_LINK_80G, + CGX_LINK_100G, + CGX_LINK_MAX, +}; + +/* REQUEST ID types. Input to firmware */ +enum cgx_cmd_id { + CGX_CMD_NONE = 0, + CGX_CMD_GET_FW_VER, + CGX_CMD_GET_MAC_ADDR, + CGX_CMD_SET_MTU, + CGX_CMD_GET_LINK_STS, /* optional to user */ + CGX_CMD_LINK_BRING_UP, /* = 5 */ + CGX_CMD_LINK_BRING_DOWN, + CGX_CMD_INTERNAL_LBK, + CGX_CMD_EXTERNAL_LBK, + CGX_CMD_HIGIG, + CGX_CMD_LINK_STAT_CHANGE, /* = 10 */ + CGX_CMD_MODE_CHANGE, /* hot plug support */ + CGX_CMD_INTF_SHUTDOWN, + CGX_CMD_GET_MKEX_SIZE, + CGX_CMD_GET_MKEX_PROFILE, + CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */ + CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */ + CGX_CMD_SET_LINK_MODE, + CGX_CMD_GET_SUPPORTED_FEC, + CGX_CMD_SET_FEC, + CGX_CMD_GET_AN, /* = 20 */ + CGX_CMD_SET_AN, + CGX_CMD_GET_ADV_LINK_MODES, + CGX_CMD_GET_ADV_FEC, + CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */ + CGX_CMD_SET_PHY_MOD_TYPE, /* = 25 */ + CGX_CMD_PRBS, + CGX_CMD_DISPLAY_EYE, + CGX_CMD_GET_PHY_FEC_STATS, + CGX_CMD_DISPLAY_SERDES, + CGX_CMD_AN_LOOPBACK, /* = 30 */ + CGX_CMD_GET_PERSIST_IGNORE, + CGX_CMD_SET_PERSIST_IGNORE, + CGX_CMD_SET_MAC_ADDR, +}; + +/* async event ids */ +enum cgx_evt_id { + CGX_EVT_NONE, + CGX_EVT_LINK_CHANGE, +}; + +/* event types - cause of interrupt */ +enum cgx_evt_type { + CGX_EVT_ASYNC, + CGX_EVT_CMD_RESP +}; + +enum cgx_stat { + CGX_STAT_SUCCESS, + CGX_STAT_FAIL +}; + +enum cgx_cmd_own { + /* default ownership with kernel/uefi/u-boot */ + CGX_OWN_NON_SECURE_SW, + /* set by kernel/uefi/u-boot after posting a new request to ATF */ + CGX_OWN_FIRMWARE, +}; + +/* Supported LINK MODE enums + * Each link mode is a bit mask of these + * enums which are represented as bits + */ +enum cgx_mode_t { + CGX_MODE_SGMII_BIT = 0, + CGX_MODE_1000_BASEX_BIT, + CGX_MODE_QSGMII_BIT, + CGX_MODE_10G_C2C_BIT, + CGX_MODE_10G_C2M_BIT, + CGX_MODE_10G_KR_BIT, + CGX_MODE_20G_C2C_BIT, + CGX_MODE_25G_C2C_BIT, + CGX_MODE_25G_C2M_BIT, + CGX_MODE_25G_2_C2C_BIT, + CGX_MODE_25G_CR_BIT, + CGX_MODE_25G_KR_BIT, + CGX_MODE_40G_C2C_BIT, + CGX_MODE_40G_C2M_BIT, + CGX_MODE_40G_CR4_BIT, + CGX_MODE_40G_KR4_BIT, + CGX_MODE_40GAUI_C2C_BIT, + CGX_MODE_50G_C2C_BIT, + CGX_MODE_50G_C2M_BIT, + CGX_MODE_50G_4_C2C_BIT, + CGX_MODE_50G_CR_BIT, + CGX_MODE_50G_KR_BIT, + CGX_MODE_80GAUI_C2C_BIT, + CGX_MODE_100G_C2C_BIT, + CGX_MODE_100G_C2M_BIT, + CGX_MODE_100G_CR4_BIT, + CGX_MODE_100G_KR4_BIT, + CGX_MODE_MAX_BIT /* = 29 */ +}; + +/* scratchx(0) CSR used for ATF->non-secure SW communication. + * This acts as the status register + * Provides details on command ack/status, link status, error details + */ + +/* CAUTION : below structures are placed in order based on the bit positions + * For any updates/new bitfields, corresponding structures needs to be updated + */ +struct cgx_evt_sts_s { /* start from bit 0 */ + u64 ack:1; + u64 evt_type:1; /* cgx_evt_type */ + u64 stat:1; /* cgx_stat */ + u64 id:6; /* cgx_evt_id/cgx_cmd_id */ + u64 reserved:55; +}; + +/* all the below structures are in the same memory location of SCRATCHX(0) + * value can be read/written based on command ID + */ + +/* Resp to command IDs with command status as CGX_STAT_FAIL + * Not applicable for commands : + * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE + * check struct cgx_lnk_sts_s comments + */ +struct cgx_err_sts_s { /* start from bit 9 */ + u64 reserved1:9; + u64 type:10; /* cgx_error_type */ + u64 reserved2:35; +}; + +/* Resp to cmd ID as CGX_CMD_GET_FW_VER with cmd status as CGX_STAT_SUCCESS */ +struct cgx_ver_s { /* start from bit 9 */ + u64 reserved1:9; + u64 major_ver:4; + u64 minor_ver:4; + u64 reserved2:47; +}; + +/* Resp to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as CGX_STAT_SUCCESS + * Returns each byte of MAC address in a separate bit field + */ +struct cgx_mac_addr_s { /* start from bit 9 */ + u64 reserved1:9; + u64 addr_0:8; + u64 addr_1:8; + u64 addr_2:8; + u64 addr_3:8; + u64 addr_4:8; + u64 addr_5:8; + u64 reserved2:7; +}; + +/* Resp to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE + * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS + * In case of CGX_STAT_FAIL, it indicates CGX configuration failed when + * processing link up/down/change command. Both err_type and current link status + * will be updated + * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current + * link status will be updated + */ +struct cgx_lnk_sts_s { + u64 reserved1:9; + u64 link_up:1; + u64 full_duplex:1; + u64 speed:4; /* cgx_link_speed */ + u64 err_type:10; + u64 an:1; /* Current AN state : enabled/disabled */ + u64 fec:2; /* Current FEC type if enabled, if not 0 */ + u64 port:8; /* Share the current port info if required */ + u64 mode:8; /* cgx_mode_t enum integer value */ + u64 reserved2:20; +}; + +struct sh_fwd_base_s { + u64 reserved1:9; + u64 addr:55; +}; + +struct cgx_link_modes_s { + u64 reserved1:9; + u64 modes:55; +}; + +/* Resp to cmd ID - CGX_CMD_GET_ADV_FEC/CGX_CMD_GET_SUPPORTED_FEC + * fec : 2 bits + * typedef enum cgx_fec_type { + * CGX_FEC_NONE, + * CGX_FEC_BASE_R, + * CGX_FEC_RS + * } fec_type_t; + */ +struct cgx_fec_types_s { + u64 reserved1:9; + u64 fec:2; + u64 reserved2:53; +}; + +/* Resp to cmd ID - CGX_CMD_GET_AN */ +struct cgx_get_an_s { + u64 reserved1:9; + u64 an:1; + u64 reserved2:54; +}; + +/* Resp to cmd ID - CGX_CMD_GET_PHY_MOD_TYPE */ +struct cgx_get_phy_mod_type_s { + u64 reserved1:9; + u64 mod:1; /* 0=NRZ, 1=PAM4 */ + u64 reserved2:54; +}; + +/* Resp to cmd ID - CGX_CMD_GET_PERSIST_IGNORE */ +struct cgx_get_flash_ignore_s { + uint64_t reserved1:9; + uint64_t ignore:1; + uint64_t reserved2:54; +}; + +union cgx_rsp_sts { + /* Fixed, applicable for all commands/events */ + struct cgx_evt_sts_s evt_sts; + /* response to CGX_CMD_LINK_BRINGUP/DOWN/LINK_CHANGE */ + struct cgx_lnk_sts_s link_sts; + /* response to CGX_CMD_GET_FW_VER */ + struct cgx_ver_s ver; + /* response to CGX_CMD_GET_MAC_ADDR */ + struct cgx_mac_addr_s mac_s; + /* response to CGX_CMD_GET_FWD_BASE */ + struct sh_fwd_base_s fwd_base_s; + /* response if evt_status = CMD_FAIL */ + struct cgx_err_sts_s err; + /* response to CGX_CMD_GET_SUPPORTED_FEC */ + struct cgx_fec_types_s supported_fec; + /* response to CGX_CMD_GET_LINK_MODES */ + struct cgx_link_modes_s supported_modes; + /* response to CGX_CMD_GET_ADV_LINK_MODES */ + struct cgx_link_modes_s adv_modes; + /* response to CGX_CMD_GET_ADV_FEC */ + struct cgx_fec_types_s adv_fec; + /* response to CGX_CMD_GET_AN */ + struct cgx_get_an_s an; + /* response to CGX_CMD_GET_PHY_MOD_TYPE */ + struct cgx_get_phy_mod_type_s phy_mod_type; + /* response to CGX_CMD_GET_PERSIST_IGNORE */ + struct cgx_get_flash_ignore_s persist; +#ifdef NT_FW_CONFIG + /* response to CGX_CMD_GET_MKEX_SIZE */ + struct cgx_mcam_profile_sz_s prfl_sz; + /* response to CGX_CMD_GET_MKEX_PROFILE */ + struct cgx_mcam_profile_addr_s prfl_addr; +#endif +}; + +union cgx_scratchx0 { + u64 u; + union cgx_rsp_sts s; +}; + +/* scratchx(1) CSR used for non-secure SW->ATF communication + * This CSR acts as a command register + */ +struct cgx_cmd { /* start from bit 2 */ + u64 reserved1:2; + u64 id:6; /* cgx_request_id */ + u64 reserved2:56; +}; + +/* all the below structures are in the same memory location of SCRATCHX(1) + * corresponding arguments for command Id needs to be updated + */ + +/* Any command using enable/disable as an argument need + * to pass the option via this structure. + * Ex: Loopback, HiGig... + */ +struct cgx_ctl_args { /* start from bit 8 */ + u64 reserved1:8; + u64 enable:1; + u64 reserved2:55; +}; + +/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */ +struct cgx_mtu_args { + u64 reserved1:8; + u64 size:16; + u64 reserved2:40; +}; + +/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */ +struct cgx_mode_change_args { + uint64_t reserved1:8; + uint64_t speed:4; /* cgx_link_speed enum */ + uint64_t duplex:1; /* 0 - full duplex, 1 - half duplex */ + uint64_t an:1; /* 0 - disable AN, 1 - enable AN */ + uint64_t port:8; /* device port */ + uint64_t mode:42; +}; + +/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */ +struct cgx_link_change_args { /* start from bit 8 */ + u64 reserved1:8; + u64 link_up:1; + u64 full_duplex:1; + u64 speed:4; /* cgx_link_speed */ + u64 reserved2:50; +}; + +/* command argument to be passed for cmd ID - CGX_CMD_SET_LINK_MODE */ +struct cgx_set_mode_args { + u64 reserved1:8; + u64 mode:56; +}; + +/* command argument to be passed for cmd ID - CGX_CMD_SET_FEC */ +struct cgx_set_fec_args { + u64 reserved1:8; + u64 fec:2; + u64 reserved2:54; +}; + +/* command argument to be passed for cmd ID - CGX_CMD_SET_PHY_MOD_TYPE */ +struct cgx_set_phy_mod_args { + u64 reserved1:8; + u64 mod:1; /* 0=NRZ, 1=PAM4 */ + u64 reserved2:55; +}; + +/* command argument to be passed for cmd ID - CGX_CMD_SET_PERSIST_IGNORE */ +struct cgx_set_flash_ignore_args { + uint64_t reserved1:8; + uint64_t ignore:1; + uint64_t reserved2:55; +}; + +/* command argument to be passed for cmd ID - CGX_CMD_SET_MAC_ADDR */ +struct cgx_mac_addr_args { + uint64_t reserved1:8; + uint64_t addr:48; + uint64_t pf_id:8; +}; + +struct cgx_prbs_args { + u64 reserved1:8; /* start from bit 8 */ + u64 lane:8; + u64 qlm:8; + u64 stop_on_error:1; + u64 mode:8; + u64 time:31; +}; + +struct cgx_display_eye_args { + u64 reserved1:8; /* start from bit 8 */ + u64 qlm:8; + u64 lane:47; +}; + +union cgx_cmd_s { + u64 own_status:2; /* cgx_cmd_own */ + struct cgx_cmd cmd; + struct cgx_ctl_args cmd_args; + struct cgx_mtu_args mtu_size; + struct cgx_link_change_args lnk_args; /* Input to CGX_CMD_LINK_CHANGE */ + struct cgx_set_mode_args mode_args; + struct cgx_mode_change_args mode_change_args; + struct cgx_set_fec_args fec_args; + struct cgx_set_phy_mod_args phy_mod_args; + struct cgx_set_flash_ignore_args persist_args; + struct cgx_mac_addr_args mac_args; + /* any other arg for command id * like : mtu, dmac filtering control */ + struct cgx_prbs_args prbs_args; + struct cgx_display_eye_args dsp_eye_args; +}; + +union cgx_scratchx1 { + u64 u; + union cgx_cmd_s s; +}; + +#endif /* __CGX_INTF_H__ */ diff --git a/drivers/net/octeontx2/lmt.h b/drivers/net/octeontx2/lmt.h new file mode 100644 index 00000000000..84a7eab8140 --- /dev/null +++ b/drivers/net/octeontx2/lmt.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +/** + * Atomically adds a signed value to a 64 bit (aligned) memory location, + * and returns previous value. + * + * This version does not perform 'sync' operations to enforce memory + * operations. This should only be used when there are no memory operation + * ordering constraints. (This should NOT be used for reference counting - + * use the standard version instead.) + * + * @param ptr address in memory to add incr to + * @param incr amount to increment memory location by (signed) + * + * @return Value of memory location before increment + */ +static inline s64 atomic_fetch_and_add64_nosync(s64 *ptr, s64 incr) +{ + s64 result; + /* Atomic add with no ordering */ + asm volatile("ldadd %x[i], %x[r], [%[b]]" + : [r] "=r" (result), "+m" (*ptr) + : [i] "r" (incr), [b] "r" (ptr) + : "memory"); + return result; +} + +static inline void lmt_cancel(const struct nix *nix) +{ + writeq(0, nix->lmt_base + LMT_LF_LMTCANCEL()); +} + +static inline u64 *lmt_store_ptr(struct nix *nix) +{ + return (u64 *)((u8 *)(nix->lmt_base) + + LMT_LF_LMTLINEX(0)); +} + +static inline s64 lmt_submit(u64 io_address) +{ + s64 result = 0; + + asm volatile("ldeor xzr, %x[rf],[%[rs]]" + : [rf] "=r"(result) : [rs] "r"(io_address)); + return result; +} diff --git a/drivers/net/octeontx2/nix.c b/drivers/net/octeontx2/nix.c new file mode 100644 index 00000000000..0a3e8e4af0b --- /dev/null +++ b/drivers/net/octeontx2/nix.c @@ -0,0 +1,831 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <errno.h> +#include <log.h> +#include <malloc.h> +#include <memalign.h> +#include <misc.h> +#include <net.h> +#include <pci.h> +#include <watchdog.h> + +#include <asm/arch/board.h> +#include <asm/arch/csrs/csrs-lmt.h> +#include <asm/io.h> +#include <asm/types.h> + +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/types.h> + +#include "nix.h" +#include "lmt.h" +#include "cgx.h" + +/** + * NIX needs a lot of memory areas. Rather than handle all the failure cases, + * we'll use a wrapper around alloc that prints an error if a memory + * allocation fails. + * + * @param num_elements + * Number of elements to allocate + * @param elem_size Size of each element + * @param msg Text string to show when allocation fails + * + * @return A valid memory location or NULL on failure + */ +static void *nix_memalloc(int num_elements, size_t elem_size, const char *msg) +{ + size_t alloc_size = num_elements * elem_size; + void *base = memalign(CONFIG_SYS_CACHELINE_SIZE, alloc_size); + + if (!base) + printf("NIX: Mem alloc failed for %s (%d * %zu = %zu bytes)\n", + msg ? msg : __func__, num_elements, elem_size, + alloc_size); + else + memset(base, 0, alloc_size); + + debug("NIX: Memory alloc for %s (%d * %zu = %zu bytes) at %p\n", + msg ? msg : __func__, num_elements, elem_size, alloc_size, base); + return base; +} + +int npc_lf_setup(struct nix *nix) +{ + int err; + + err = npc_lf_admin_setup(nix); + if (err) { + printf("%s: Error setting up npc lf admin\n", __func__); + return err; + } + + return 0; +} + +static int npa_setup_pool(struct npa *npa, u32 pool_id, + size_t buffer_size, u32 queue_length, void *buffers[]) +{ + struct { + union npa_lf_aura_op_free0 f0; + union npa_lf_aura_op_free1 f1; + } aura_descr; + int index; + + for (index = 0; index < queue_length; index++) { + buffers[index] = memalign(CONFIG_SYS_CACHELINE_SIZE, + buffer_size); + if (!buffers[index]) { + printf("%s: Out of memory %d, size: %zu\n", + __func__, index, buffer_size); + return -ENOMEM; + } + debug("%s: allocating buffer %d, addr %p size: %zu\n", + __func__, index, buffers[index], buffer_size); + + /* Add the newly obtained pointer to the pool. 128 bit + * writes only. + */ + aura_descr.f0.s.addr = (u64)buffers[index]; + aura_descr.f1.u = 0; + aura_descr.f1.s.aura = pool_id; + st128(npa->npa_base + NPA_LF_AURA_OP_FREE0(), + aura_descr.f0.u, aura_descr.f1.u); + } + + return 0; +} + +int npa_lf_setup(struct nix *nix) +{ + struct rvu_pf *rvu = dev_get_priv(nix->dev); + struct nix_af *nix_af = nix->nix_af; + struct npa *npa; + union npa_af_const npa_af_const; + union npa_aura_s *aura; + union npa_pool_s *pool; + union rvu_func_addr_s block_addr; + int idx; + int stack_page_pointers; + int stack_page_bytes; + int err; + + npa = (struct npa *)calloc(1, sizeof(struct npa)); + if (!npa) { + printf("%s: out of memory for npa instance\n", __func__); + return -ENOMEM; + } + block_addr.u = 0; + block_addr.s.block = RVU_BLOCK_ADDR_E_NPA; + npa->npa_base = rvu->pf_base + block_addr.u; + npa->npa_af = nix_af->npa_af; + nix->npa = npa; + + npa_af_const.u = npa_af_reg_read(npa->npa_af, NPA_AF_CONST()); + stack_page_pointers = npa_af_const.s.stack_page_ptrs; + stack_page_bytes = npa_af_const.s.stack_page_bytes; + + npa->stack_pages[NPA_POOL_RX] = (RQ_QLEN + stack_page_pointers - 1) / + stack_page_pointers; + npa->stack_pages[NPA_POOL_TX] = (SQ_QLEN + stack_page_pointers - 1) / + stack_page_pointers; + npa->stack_pages[NPA_POOL_SQB] = (SQB_QLEN + stack_page_pointers - 1) / + stack_page_pointers; + npa->pool_stack_pointers = stack_page_pointers; + + npa->q_len[NPA_POOL_RX] = RQ_QLEN; + npa->q_len[NPA_POOL_TX] = SQ_QLEN; + npa->q_len[NPA_POOL_SQB] = SQB_QLEN; + + npa->buf_size[NPA_POOL_RX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE; + npa->buf_size[NPA_POOL_TX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE; + npa->buf_size[NPA_POOL_SQB] = nix_af->sqb_size; + + npa->aura_ctx = nix_memalloc(NPA_POOL_COUNT, + sizeof(union npa_aura_s), + "aura context"); + if (!npa->aura_ctx) { + printf("%s: Out of memory for aura context\n", __func__); + return -ENOMEM; + } + + for (idx = 0; idx < NPA_POOL_COUNT; idx++) { + npa->pool_ctx[idx] = nix_memalloc(1, + sizeof(union npa_pool_s), + "pool context"); + if (!npa->pool_ctx[idx]) { + printf("%s: Out of memory for pool context\n", + __func__); + return -ENOMEM; + } + npa->pool_stack[idx] = nix_memalloc(npa->stack_pages[idx], + stack_page_bytes, + "pool stack"); + if (!npa->pool_stack[idx]) { + printf("%s: Out of memory for pool stack\n", __func__); + return -ENOMEM; + } + } + + err = npa_lf_admin_setup(npa, nix->lf, (dma_addr_t)npa->aura_ctx); + if (err) { + printf("%s: Error setting up NPA LF admin for lf %d\n", + __func__, nix->lf); + return err; + } + + /* Set up the auras */ + for (idx = 0; idx < NPA_POOL_COUNT; idx++) { + aura = npa->aura_ctx + (idx * sizeof(union npa_aura_s)); + pool = npa->pool_ctx[idx]; + debug("%s aura %p pool %p\n", __func__, aura, pool); + memset(aura, 0, sizeof(union npa_aura_s)); + aura->s.fc_ena = 0; + aura->s.pool_addr = (u64)npa->pool_ctx[idx]; + debug("%s aura.s.pool_addr %llx pool_addr %p\n", __func__, + aura->s.pool_addr, npa->pool_ctx[idx]); + aura->s.shift = 64 - __builtin_clzll(npa->q_len[idx]) - 8; + aura->s.count = npa->q_len[idx]; + aura->s.limit = npa->q_len[idx]; + aura->s.ena = 1; + err = npa_attach_aura(nix_af, nix->lf, aura, idx); + if (err) + return err; + + memset(pool, 0, sizeof(*pool)); + pool->s.fc_ena = 0; + pool->s.nat_align = 1; + pool->s.stack_base = (u64)(npa->pool_stack[idx]); + debug("%s pool.s.stack_base %llx stack_base %p\n", __func__, + pool->s.stack_base, npa->pool_stack[idx]); + pool->s.buf_size = + npa->buf_size[idx] / CONFIG_SYS_CACHELINE_SIZE; + pool->s.stack_max_pages = npa->stack_pages[idx]; + pool->s.shift = + 64 - __builtin_clzll(npa->pool_stack_pointers) - 8; + pool->s.ptr_start = 0; + pool->s.ptr_end = (1ULL << 40) - 1; + pool->s.ena = 1; + err = npa_attach_pool(nix_af, nix->lf, pool, idx); + if (err) + return err; + } + + for (idx = 0; idx < NPA_POOL_COUNT; idx++) { + npa->buffers[idx] = nix_memalloc(npa->q_len[idx], + sizeof(void *), + "buffers"); + if (!npa->buffers[idx]) { + printf("%s: Out of memory\n", __func__); + return -ENOMEM; + } + } + + for (idx = 0; idx < NPA_POOL_COUNT; idx++) { + err = npa_setup_pool(npa, idx, npa->buf_size[idx], + npa->q_len[idx], npa->buffers[idx]); + if (err) { + printf("%s: Error setting up pool %d\n", + __func__, idx); + return err; + } + } + return 0; +} + +int npa_lf_shutdown(struct nix *nix) +{ + struct npa *npa = nix->npa; + int err; + int pool; + + err = npa_lf_admin_shutdown(nix->nix_af, nix->lf, NPA_POOL_COUNT); + if (err) { + printf("%s: Error %d shutting down NPA LF admin\n", + __func__, err); + return err; + } + free(npa->aura_ctx); + npa->aura_ctx = NULL; + + for (pool = 0; pool < NPA_POOL_COUNT; pool++) { + free(npa->pool_ctx[pool]); + npa->pool_ctx[pool] = NULL; + free(npa->pool_stack[pool]); + npa->pool_stack[pool] = NULL; + free(npa->buffers[pool]); + npa->buffers[pool] = NULL; + } + + return 0; +} + +int nix_lf_setup(struct nix *nix) +{ + struct nix_af *nix_af = nix->nix_af; + int idx; + int err = -1; + + /* Alloc NIX RQ HW context memory */ + nix->rq_ctx_base = nix_memalloc(nix->rq_cnt, nix_af->rq_ctx_sz, + "RQ CTX"); + if (!nix->rq_ctx_base) + goto error; + memset(nix->rq_ctx_base, 0, nix_af->rq_ctx_sz); + + /* Alloc NIX SQ HW context memory */ + nix->sq_ctx_base = nix_memalloc(nix->sq_cnt, nix_af->sq_ctx_sz, + "SQ CTX"); + if (!nix->sq_ctx_base) + goto error; + memset(nix->sq_ctx_base, 0, nix_af->sq_ctx_sz); + + /* Alloc NIX CQ HW context memory */ + nix->cq_ctx_base = nix_memalloc(nix->cq_cnt, nix_af->cq_ctx_sz, + "CQ CTX"); + if (!nix->cq_ctx_base) + goto error; + memset(nix->cq_ctx_base, 0, nix_af->cq_ctx_sz * NIX_CQ_COUNT); + /* Alloc NIX CQ Ring memory */ + for (idx = 0; idx < NIX_CQ_COUNT; idx++) { + err = qmem_alloc(&nix->cq[idx], CQ_ENTRIES, CQ_ENTRY_SIZE); + if (err) + goto error; + } + + /* Alloc memory for Qints HW contexts */ + nix->qint_base = nix_memalloc(nix_af->qints, nix_af->qint_ctx_sz, + "Qint CTX"); + if (!nix->qint_base) + goto error; + /* Alloc memory for CQints HW contexts */ + nix->cint_base = nix_memalloc(nix_af->cints, nix_af->cint_ctx_sz, + "Cint CTX"); + if (!nix->cint_base) + goto error; + /* Alloc NIX RSS HW context memory and config the base */ + nix->rss_base = nix_memalloc(nix->rss_grps, nix_af->rsse_ctx_sz, + "RSS CTX"); + if (!nix->rss_base) + goto error; + + err = nix_lf_admin_setup(nix); + if (err) { + printf("%s: Error setting up LF\n", __func__); + goto error; + } + + return 0; + +error: + if (nix->rq_ctx_base) + free(nix->rq_ctx_base); + nix->rq_ctx_base = NULL; + if (nix->rq_ctx_base) + free(nix->rq_ctx_base); + nix->rq_ctx_base = NULL; + if (nix->sq_ctx_base) + free(nix->sq_ctx_base); + nix->sq_ctx_base = NULL; + if (nix->cq_ctx_base) + free(nix->cq_ctx_base); + nix->cq_ctx_base = NULL; + + for (idx = 0; idx < NIX_CQ_COUNT; idx++) + qmem_free(&nix->cq[idx]); + + return err; +} + +int nix_lf_shutdown(struct nix *nix) +{ + struct nix_af *nix_af = nix->nix_af; + int index; + int err; + + err = nix_lf_admin_shutdown(nix_af, nix->lf, nix->cq_cnt, + nix->rq_cnt, nix->sq_cnt); + if (err) { + printf("%s: Error shutting down LF admin\n", __func__); + return err; + } + + if (nix->rq_ctx_base) + free(nix->rq_ctx_base); + nix->rq_ctx_base = NULL; + if (nix->rq_ctx_base) + free(nix->rq_ctx_base); + nix->rq_ctx_base = NULL; + if (nix->sq_ctx_base) + free(nix->sq_ctx_base); + nix->sq_ctx_base = NULL; + if (nix->cq_ctx_base) + free(nix->cq_ctx_base); + nix->cq_ctx_base = NULL; + + for (index = 0; index < NIX_CQ_COUNT; index++) + qmem_free(&nix->cq[index]); + + debug("%s: nix lf %d reset --\n", __func__, nix->lf); + return 0; +} + +struct nix *nix_lf_alloc(struct udevice *dev) +{ + union rvu_func_addr_s block_addr; + struct nix *nix; + struct rvu_pf *rvu = dev_get_priv(dev); + struct rvu_af *rvu_af = dev_get_priv(rvu->afdev); + union rvu_pf_func_s pf_func; + int err; + + debug("%s(%s )\n", __func__, dev->name); + + nix = (struct nix *)calloc(1, sizeof(*nix)); + if (!nix) { + printf("%s: Out of memory for nix instance\n", __func__); + return NULL; + } + nix->nix_af = rvu_af->nix_af; + + block_addr.u = 0; + block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0); + nix->nix_base = rvu->pf_base + block_addr.u; + block_addr.u = 0; + block_addr.s.block = RVU_BLOCK_ADDR_E_NPC; + nix->npc_base = rvu->pf_base + block_addr.u; + block_addr.u = 0; + block_addr.s.block = RVU_BLOCK_ADDR_E_LMT; + nix->lmt_base = rvu->pf_base + block_addr.u; + + pf_func.u = 0; + pf_func.s.pf = rvu->pfid; + nix->pf_func = pf_func.u; + nix->lf = rvu->nix_lfid; + nix->pf = rvu->pfid; + nix->dev = dev; + nix->sq_cnt = 1; + nix->rq_cnt = 1; + nix->rss_grps = 1; + nix->cq_cnt = 2; + nix->xqe_sz = NIX_CQE_SIZE_W16; + + nix->lmac = nix_get_cgx_lmac(nix->pf); + if (!nix->lmac) { + printf("%s: Error: could not find lmac for pf %d\n", + __func__, nix->pf); + free(nix); + return NULL; + } + nix->lmac->link_num = + NIX_LINK_E_CGXX_LMACX(nix->lmac->cgx->cgx_id, + nix->lmac->lmac_id); + nix->lmac->chan_num = + NIX_CHAN_E_CGXX_LMACX_CHX(nix->lmac->cgx->cgx_id, + nix->lmac->lmac_id, 0); + /* This is rx pkind in 1:1 mapping to NIX_LINK_E */ + nix->lmac->pknd = nix->lmac->link_num; + + cgx_lmac_set_pkind(nix->lmac, nix->lmac->lmac_id, nix->lmac->pknd); + debug("%s(%s CGX%x LMAC%x)\n", __func__, dev->name, + nix->lmac->cgx->cgx_id, nix->lmac->lmac_id); + debug("%s(%s Link %x Chan %x Pknd %x)\n", __func__, dev->name, + nix->lmac->link_num, nix->lmac->chan_num, nix->lmac->pknd); + + err = npa_lf_setup(nix); + if (err) + return NULL; + + err = npc_lf_setup(nix); + if (err) + return NULL; + + err = nix_lf_setup(nix); + if (err) + return NULL; + + return nix; +} + +u64 npa_aura_op_alloc(struct npa *npa, u64 aura_id) +{ + union npa_lf_aura_op_allocx op_allocx; + + op_allocx.u = atomic_fetch_and_add64_nosync(npa->npa_base + + NPA_LF_AURA_OP_ALLOCX(0), aura_id); + return op_allocx.s.addr; +} + +u64 nix_cq_op_status(struct nix *nix, u64 cq_id) +{ + union nixx_lf_cq_op_status op_status; + s64 *reg = nix->nix_base + NIXX_LF_CQ_OP_STATUS(); + + op_status.u = atomic_fetch_and_add64_nosync(reg, cq_id << 32); + return op_status.u; +} + +/* TX */ +static inline void nix_write_lmt(struct nix *nix, void *buffer, + int num_words) +{ + int i; + + u64 *lmt_ptr = lmt_store_ptr(nix); + u64 *ptr = buffer; + + debug("%s lmt_ptr %p %p\n", __func__, nix->lmt_base, lmt_ptr); + for (i = 0; i < num_words; i++) { + debug("%s data %llx lmt_ptr %p\n", __func__, ptr[i], + lmt_ptr + i); + lmt_ptr[i] = ptr[i]; + } +} + +void nix_cqe_tx_pkt_handler(struct nix *nix, void *cqe) +{ + union nix_cqe_hdr_s *txcqe = (union nix_cqe_hdr_s *)cqe; + + debug("%s: txcqe: %p\n", __func__, txcqe); + + if (txcqe->s.cqe_type != NIX_XQE_TYPE_E_SEND) { + printf("%s: Error: Unsupported CQ header type %d\n", + __func__, txcqe->s.cqe_type); + return; + } + nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(), + (NIX_CQ_TX << 32) | 1); +} + +void nix_lf_flush_tx(struct udevice *dev) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + union nixx_lf_cq_op_status op_status; + u32 head, tail; + void *cq_tx_base = nix->cq[NIX_CQ_TX].base; + union nix_cqe_hdr_s *cqe; + + /* ack tx cqe entries */ + op_status.u = nix_cq_op_status(nix, NIX_CQ_TX); + head = op_status.s.head; + tail = op_status.s.tail; + head &= (nix->cq[NIX_CQ_TX].qsize - 1); + tail &= (nix->cq[NIX_CQ_TX].qsize - 1); + + debug("%s cq tx head %d tail %d\n", __func__, head, tail); + while (head != tail) { + cqe = cq_tx_base + head * nix->cq[NIX_CQ_TX].entry_sz; + nix_cqe_tx_pkt_handler(nix, cqe); + op_status.u = nix_cq_op_status(nix, NIX_CQ_TX); + head = op_status.s.head; + tail = op_status.s.tail; + head &= (nix->cq[NIX_CQ_TX].qsize - 1); + tail &= (nix->cq[NIX_CQ_TX].qsize - 1); + debug("%s cq tx head %d tail %d\n", __func__, head, tail); + } +} + +int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + struct nix_tx_dr tx_dr; + int dr_sz = (sizeof(struct nix_tx_dr) + 15) / 16 - 1; + s64 result; + void *packet; + + nix_lf_flush_tx(dev); + memset((void *)&tx_dr, 0, sizeof(struct nix_tx_dr)); + /* Dump TX packet in to NPA buffer */ + packet = (void *)npa_aura_op_alloc(nix->npa, NPA_POOL_TX); + if (!packet) { + printf("%s TX buffers unavailable\n", __func__); + return -1; + } + memcpy(packet, pkt, pkt_len); + debug("%s TX buffer %p\n", __func__, packet); + + tx_dr.hdr.s.aura = NPA_POOL_TX; + tx_dr.hdr.s.df = 0; + tx_dr.hdr.s.pnc = 1; + tx_dr.hdr.s.sq = 0; + tx_dr.hdr.s.total = pkt_len; + tx_dr.hdr.s.sizem1 = dr_sz - 2; /* FIXME - for now hdr+sg+sg1addr */ + debug("%s dr_sz %d\n", __func__, dr_sz); + + tx_dr.tx_sg.s.segs = 1; + tx_dr.tx_sg.s.subdc = NIX_SUBDC_E_SG; + tx_dr.tx_sg.s.seg1_size = pkt_len; + tx_dr.tx_sg.s.ld_type = NIX_SENDLDTYPE_E_LDT; + tx_dr.sg1_addr = (dma_addr_t)packet; + +#define DEBUG_PKT +#ifdef DEBUG_PKT + debug("TX PKT Data\n"); + for (int i = 0; i < pkt_len; i++) { + if (i && (i % 8 == 0)) + debug("\n"); + debug("%02x ", *((u8 *)pkt + i)); + } + debug("\n"); +#endif + do { + nix_write_lmt(nix, &tx_dr, (dr_sz - 1) * 2); + __iowmb(); + result = lmt_submit((u64)(nix->nix_base + + NIXX_LF_OP_SENDX(0))); + WATCHDOG_RESET(); + } while (result == 0); + + return 0; +} + +/* RX */ +void nix_lf_flush_rx(struct udevice *dev) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + union nixx_lf_cq_op_status op_status; + void *cq_rx_base = nix->cq[NIX_CQ_RX].base; + struct nix_rx_dr *rx_dr; + union nix_rx_parse_s *rxparse; + u32 head, tail; + u32 rx_cqe_sz = nix->cq[NIX_CQ_RX].entry_sz; + u64 *seg; + + /* flush rx cqe entries */ + op_status.u = nix_cq_op_status(nix, NIX_CQ_RX); + head = op_status.s.head; + tail = op_status.s.tail; + head &= (nix->cq[NIX_CQ_RX].qsize - 1); + tail &= (nix->cq[NIX_CQ_RX].qsize - 1); + + debug("%s cq rx head %d tail %d\n", __func__, head, tail); + while (head != tail) { + rx_dr = (struct nix_rx_dr *)cq_rx_base + head * rx_cqe_sz; + rxparse = &rx_dr->rx_parse; + + debug("%s: rx parse: %p\n", __func__, rxparse); + debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n", + __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1); + + seg = (dma_addr_t *)(&rx_dr->rx_sg + 1); + + st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(), + seg[0], (1ULL << 63) | NPA_POOL_RX); + + debug("%s return %llx to NPA\n", __func__, seg[0]); + nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(), + (NIX_CQ_RX << 32) | 1); + + op_status.u = nix_cq_op_status(nix, NIX_CQ_RX); + head = op_status.s.head; + tail = op_status.s.tail; + head &= (nix->cq[NIX_CQ_RX].qsize - 1); + tail &= (nix->cq[NIX_CQ_RX].qsize - 1); + debug("%s cq rx head %d tail %d\n", __func__, head, tail); + } +} + +int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + + /* Return rx packet to NPA */ + debug("%s return %p to NPA\n", __func__, pkt); + st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(), (u64)pkt, + (1ULL << 63) | NPA_POOL_RX); + nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(), + (NIX_CQ_RX << 32) | 1); + + nix_lf_flush_tx(dev); + return 0; +} + +int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + union nixx_lf_cq_op_status op_status; + void *cq_rx_base = nix->cq[NIX_CQ_RX].base; + struct nix_rx_dr *rx_dr; + union nix_rx_parse_s *rxparse; + void *pkt, *cqe; + int pkt_len = 0; + u64 *addr; + u32 head, tail; + + /* fetch rx cqe entries */ + op_status.u = nix_cq_op_status(nix, NIX_CQ_RX); + head = op_status.s.head; + tail = op_status.s.tail; + head &= (nix->cq[NIX_CQ_RX].qsize - 1); + tail &= (nix->cq[NIX_CQ_RX].qsize - 1); + debug("%s cq rx head %d tail %d\n", __func__, head, tail); + if (head == tail) + return -EAGAIN; + + debug("%s: rx_base %p head %d sz %d\n", __func__, cq_rx_base, head, + nix->cq[NIX_CQ_RX].entry_sz); + cqe = cq_rx_base + head * nix->cq[NIX_CQ_RX].entry_sz; + rx_dr = (struct nix_rx_dr *)cqe; + rxparse = &rx_dr->rx_parse; + + debug("%s: rx completion: %p\n", __func__, cqe); + debug("%s: rx dr: %p\n", __func__, rx_dr); + debug("%s: rx parse: %p\n", __func__, rxparse); + debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n", + __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1); + debug("%s: rx parse: pkind %x chan %x\n", + __func__, rxparse->s.pkind, rxparse->s.chan); + + if (rx_dr->hdr.s.cqe_type != NIX_XQE_TYPE_E_RX) { + printf("%s: Error: Unsupported CQ header type in Rx %d\n", + __func__, rx_dr->hdr.s.cqe_type); + return -1; + } + + pkt_len = rxparse->s.pkt_lenm1 + 1; + addr = (dma_addr_t *)(&rx_dr->rx_sg + 1); + pkt = (void *)addr[0]; + + debug("%s: segs: %d (%d@0x%llx, %d@0x%llx, %d@0x%llx)\n", __func__, + rx_dr->rx_sg.s.segs, rx_dr->rx_sg.s.seg1_size, addr[0], + rx_dr->rx_sg.s.seg2_size, addr[1], + rx_dr->rx_sg.s.seg3_size, addr[2]); + if (pkt_len < rx_dr->rx_sg.s.seg1_size + rx_dr->rx_sg.s.seg2_size + + rx_dr->rx_sg.s.seg3_size) { + debug("%s: Error: rx buffer size too small\n", __func__); + return -1; + } + + __iowmb(); +#define DEBUG_PKT +#ifdef DEBUG_PKT + debug("RX PKT Data\n"); + for (int i = 0; i < pkt_len; i++) { + if (i && (i % 8 == 0)) + debug("\n"); + debug("%02x ", *((u8 *)pkt + i)); + } + debug("\n"); +#endif + + *packetp = (uchar *)pkt; + + return pkt_len; +} + +int nix_lf_setup_mac(struct udevice *dev) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + struct eth_pdata *pdata = dev_get_platdata(dev); + + /* If lower level firmware fails to set proper MAC + * u-boot framework updates MAC to random address. + * Use this hook to update mac address in cgx lmac + * and call mac filter setup to update new address. + */ + if (memcmp(nix->lmac->mac_addr, pdata->enetaddr, ARP_HLEN)) { + memcpy(nix->lmac->mac_addr, pdata->enetaddr, 6); + eth_env_set_enetaddr_by_index("eth", rvu->dev->seq, + pdata->enetaddr); + cgx_lmac_mac_filter_setup(nix->lmac); + /* Update user given MAC address to ATF for update + * in sh_fwdata to use in Linux. + */ + cgx_intf_set_macaddr(dev); + debug("%s: lMAC %pM\n", __func__, nix->lmac->mac_addr); + debug("%s: pMAC %pM\n", __func__, pdata->enetaddr); + } + debug("%s: setupMAC %pM\n", __func__, pdata->enetaddr); + return 0; +} + +void nix_lf_halt(struct udevice *dev) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + + cgx_lmac_rx_tx_enable(nix->lmac, nix->lmac->lmac_id, false); + + mdelay(1); + + /* Flush tx and rx descriptors */ + nix_lf_flush_rx(dev); + nix_lf_flush_tx(dev); +} + +int nix_lf_init(struct udevice *dev) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + struct lmac *lmac = nix->lmac; + int ret; + u64 link_sts; + u8 link, speed; + u16 errcode; + + printf("Waiting for CGX%d LMAC%d [%s] link status...", + lmac->cgx->cgx_id, lmac->lmac_id, + lmac_type_to_str[lmac->lmac_type]); + + if (lmac->init_pend) { + /* Bring up LMAC */ + ret = cgx_lmac_link_enable(lmac, lmac->lmac_id, + true, &link_sts); + lmac->init_pend = 0; + } else { + ret = cgx_lmac_link_status(lmac, lmac->lmac_id, &link_sts); + } + + if (ret) { + printf(" [Down]\n"); + return -1; + } + + link = link_sts & 0x1; + speed = (link_sts >> 2) & 0xf; + errcode = (link_sts >> 6) & 0x2ff; + debug("%s: link %x speed %x errcode %x\n", + __func__, link, speed, errcode); + + /* Print link status */ + printf(" [%s]\n", link ? lmac_speed_to_str[speed] : "Down"); + if (!link) + return -1; + + if (!lmac->init_pend) + cgx_lmac_rx_tx_enable(lmac, lmac->lmac_id, true); + + return 0; +} + +void nix_get_cgx_lmac_id(struct udevice *dev, int *cgxid, int *lmacid) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + struct lmac *lmac = nix->lmac; + + *cgxid = lmac->cgx->cgx_id; + *lmacid = lmac->lmac_id; +} + +void nix_print_mac_info(struct udevice *dev) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + struct nix *nix = rvu->nix; + struct lmac *lmac = nix->lmac; + + printf(" CGX%d LMAC%d [%s]", lmac->cgx->cgx_id, lmac->lmac_id, + lmac_type_to_str[lmac->lmac_type]); +} + diff --git a/drivers/net/octeontx2/nix.h b/drivers/net/octeontx2/nix.h new file mode 100644 index 00000000000..03260dddb32 --- /dev/null +++ b/drivers/net/octeontx2/nix.h @@ -0,0 +1,353 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef __NIX_H__ +#define __NIX_H__ + +#include <asm/arch/csrs/csrs-npa.h> +#include <asm/arch/csrs/csrs-nix.h> +#include "rvu.h" + +/** Maximum number of LMACs supported */ +#define MAX_LMAC 12 + +/* NIX RX action operation*/ +#define NIX_RX_ACTIONOP_DROP (0x0ull) +#define NIX_RX_ACTIONOP_UCAST (0x1ull) +#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull) +#define NIX_RX_ACTIONOP_MCAST (0x3ull) +#define NIX_RX_ACTIONOP_RSS (0x4ull) + +/* NIX TX action operation*/ +#define NIX_TX_ACTIONOP_DROP (0x0ull) +#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull) +#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull) +#define NIX_TX_ACTIONOP_MCAST (0x3ull) +#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull) + +#define NIX_INTF_RX 0 +#define NIX_INTF_TX 1 + +#define NIX_INTF_TYPE_CGX 0 +#define NIX_INTF_TYPE_LBK 1 +#define NIX_MAX_HW_MTU 9212 +#define NIX_MIN_HW_MTU 40 +#define MAX_MTU 1536 + +#define NPA_POOL_COUNT 3 +#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6)) +#define NPA_POOL_RX 0ULL +#define NPA_POOL_TX 1ULL +#define NPA_POOL_SQB 2ULL +#define RQ_QLEN Q_COUNT(Q_SIZE_1K) +#define SQ_QLEN Q_COUNT(Q_SIZE_1K) +#define SQB_QLEN Q_COUNT(Q_SIZE_16) + +#define NIX_CQ_RX 0ULL +#define NIX_CQ_TX 1ULL +#define NIX_CQ_COUNT 2ULL +#define NIX_CQE_SIZE_W16 (16 * sizeof(u64)) +#define NIX_CQE_SIZE_W64 (64 * sizeof(u64)) + +/** Size of aura hardware context */ +#define NPA_AURA_HW_CTX_SIZE 48 +/** Size of pool hardware context */ +#define NPA_POOL_HW_CTX_SIZE 64 + +#define NPA_DEFAULT_PF_FUNC 0xffff + +#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) +#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) +#define NIX_LINK_LBK(a) (12 + (a)) +#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) +#define MAX_LMAC_PKIND 12 + +/** Number of Admin queue entries */ +#define AQ_RING_SIZE Q_COUNT(Q_SIZE_16) + +/** Each completion queue contains 256 entries, see NIC_CQ_CTX_S[qsize] */ +#define CQS_QSIZE Q_SIZE_256 +#define CQ_ENTRIES Q_COUNT(CQS_QSIZE) +/** + * Each completion queue entry contains 128 bytes, see + * NIXX_AF_LFX_CFG[xqe_size] + */ +#define CQ_ENTRY_SIZE NIX_CQE_SIZE_W16 + +enum npa_aura_size { + NPA_AURA_SZ_0, + NPA_AURA_SZ_128, + NPA_AURA_SZ_256, + NPA_AURA_SZ_512, + NPA_AURA_SZ_1K, + NPA_AURA_SZ_2K, + NPA_AURA_SZ_4K, + NPA_AURA_SZ_8K, + NPA_AURA_SZ_16K, + NPA_AURA_SZ_32K, + NPA_AURA_SZ_64K, + NPA_AURA_SZ_128K, + NPA_AURA_SZ_256K, + NPA_AURA_SZ_512K, + NPA_AURA_SZ_1M, + NPA_AURA_SZ_MAX, +}; + +#define NPA_AURA_SIZE_DEFAULT NPA_AURA_SZ_128 + +/* NIX Transmit schedulers */ +enum nix_scheduler { + NIX_TXSCH_LVL_SMQ = 0x0, + NIX_TXSCH_LVL_MDQ = 0x0, + NIX_TXSCH_LVL_TL4 = 0x1, + NIX_TXSCH_LVL_TL3 = 0x2, + NIX_TXSCH_LVL_TL2 = 0x3, + NIX_TXSCH_LVL_TL1 = 0x4, + NIX_TXSCH_LVL_CNT = 0x5, +}; + +struct cgx; + +struct nix_stats { + u64 num_packets; + u64 num_bytes; +}; + +struct nix; +struct lmac; + +struct npa_af { + void __iomem *npa_af_base; + struct admin_queue aq; + u32 aura; +}; + +struct npa { + struct npa_af *npa_af; + void __iomem *npa_base; + void __iomem *npc_base; + void __iomem *lmt_base; + /** Hardware aura context */ + void *aura_ctx; + /** Hardware pool context */ + void *pool_ctx[NPA_POOL_COUNT]; + void *pool_stack[NPA_POOL_COUNT]; + void **buffers[NPA_POOL_COUNT]; + u32 pool_stack_pages[NPA_POOL_COUNT]; + u32 pool_stack_pointers; + u32 q_len[NPA_POOL_COUNT]; + u32 buf_size[NPA_POOL_COUNT]; + u32 stack_pages[NPA_POOL_COUNT]; +}; + +struct nix_af { + struct udevice *dev; + struct nix *lmacs[MAX_LMAC]; + struct npa_af *npa_af; + void __iomem *nix_af_base; + void __iomem *npc_af_base; + struct admin_queue aq; + u8 num_lmacs; + s8 index; + u8 xqe_size; + u32 sqb_size; + u32 qints; + u32 cints; + u32 sq_ctx_sz; + u32 rq_ctx_sz; + u32 cq_ctx_sz; + u32 rsse_ctx_sz; + u32 cint_ctx_sz; + u32 qint_ctx_sz; +}; + +struct nix_tx_dr { + union nix_send_hdr_s hdr; + union nix_send_sg_s tx_sg; + dma_addr_t sg1_addr; + dma_addr_t sg2_addr; + dma_addr_t sg3_addr; + u64 in_use; +}; + +struct nix_rx_dr { + union nix_cqe_hdr_s hdr; + union nix_rx_parse_s rx_parse; + union nix_rx_sg_s rx_sg; +}; + +struct nix { + struct udevice *dev; + struct eth_device *netdev; + struct nix_af *nix_af; + struct npa *npa; + struct lmac *lmac; + union nix_cint_hw_s *cint_base; + union nix_cq_ctx_s *cq_ctx_base; + union nix_qint_hw_s *qint_base; + union nix_rq_ctx_s *rq_ctx_base; + union nix_rsse_s *rss_base; + union nix_sq_ctx_s *sq_ctx_base; + void *cqe_base; + struct qmem sq; + struct qmem cq[NIX_CQ_COUNT]; + struct qmem rq; + struct qmem rss; + struct qmem cq_ints; + struct qmem qints; + char name[16]; + void __iomem *nix_base; /** PF reg base */ + void __iomem *npc_base; + void __iomem *lmt_base; + struct nix_stats tx_stats; + struct nix_stats rx_stats; + u32 aura; + int pknd; + int lf; + int pf; + u16 pf_func; + u32 rq_cnt; /** receive queues count */ + u32 sq_cnt; /** send queues count */ + u32 cq_cnt; /** completion queues count */ + u16 rss_sz; + u16 sqb_size; + u8 rss_grps; + u8 xqe_sz; +}; + +struct nix_aq_cq_dis { + union nix_aq_res_s resp ALIGNED; + union nix_cq_ctx_s cq ALIGNED; + union nix_cq_ctx_s mcq ALIGNED; +}; + +struct nix_aq_rq_dis { + union nix_aq_res_s resp ALIGNED; + union nix_rq_ctx_s rq ALIGNED; + union nix_rq_ctx_s mrq ALIGNED; +}; + +struct nix_aq_sq_dis { + union nix_aq_res_s resp ALIGNED; + union nix_sq_ctx_s sq ALIGNED; + union nix_sq_ctx_s msq ALIGNED; +}; + +struct nix_aq_cq_request { + union nix_aq_res_s resp ALIGNED; + union nix_cq_ctx_s cq ALIGNED; +}; + +struct nix_aq_rq_request { + union nix_aq_res_s resp ALIGNED; + union nix_rq_ctx_s rq ALIGNED; +}; + +struct nix_aq_sq_request { + union nix_aq_res_s resp ALIGNED; + union nix_sq_ctx_s sq ALIGNED; +}; + +static inline u64 nix_af_reg_read(struct nix_af *nix_af, u64 offset) +{ + u64 val = readq(nix_af->nix_af_base + offset); + + debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset, + val); + return val; +} + +static inline void nix_af_reg_write(struct nix_af *nix_af, u64 offset, + u64 val) +{ + debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset, + val); + writeq(val, nix_af->nix_af_base + offset); +} + +static inline u64 nix_pf_reg_read(struct nix *nix, u64 offset) +{ + u64 val = readq(nix->nix_base + offset); + + debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset, + val); + return val; +} + +static inline void nix_pf_reg_write(struct nix *nix, u64 offset, + u64 val) +{ + debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset, + val); + writeq(val, nix->nix_base + offset); +} + +static inline u64 npa_af_reg_read(struct npa_af *npa_af, u64 offset) +{ + u64 val = readq(npa_af->npa_af_base + offset); + + debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset, + val); + return val; +} + +static inline void npa_af_reg_write(struct npa_af *npa_af, u64 offset, + u64 val) +{ + debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset, + val); + writeq(val, npa_af->npa_af_base + offset); +} + +static inline u64 npc_af_reg_read(struct nix_af *nix_af, u64 offset) +{ + u64 val = readq(nix_af->npc_af_base + offset); + + debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset, + val); + return val; +} + +static inline void npc_af_reg_write(struct nix_af *nix_af, u64 offset, + u64 val) +{ + debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset, + val); + writeq(val, nix_af->npc_af_base + offset); +} + +int npa_attach_aura(struct nix_af *nix_af, int lf, + const union npa_aura_s *desc, u32 aura_id); +int npa_attach_pool(struct nix_af *nix_af, int lf, + const union npa_pool_s *desc, u32 pool_id); +int npa_af_setup(struct npa_af *npa_af); +int npa_af_shutdown(struct npa_af *npa_af); +int npa_lf_setup(struct nix *nix); +int npa_lf_shutdown(struct nix *nix); +int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base); +int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count); + +int npc_lf_admin_setup(struct nix *nix); +int npc_af_shutdown(struct nix_af *nix_af); + +int nix_af_setup(struct nix_af *nix_af); +int nix_af_shutdown(struct nix_af *nix_af); +int nix_lf_setup(struct nix *nix); +int nix_lf_shutdown(struct nix *nix); +struct nix *nix_lf_alloc(struct udevice *dev); +int nix_lf_admin_setup(struct nix *nix); +int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf, + u32 cq_count, u32 rq_count, u32 sq_count); +struct rvu_af *get_af(void); + +int nix_lf_setup_mac(struct udevice *dev); +int nix_lf_read_rom_mac(struct udevice *dev); +void nix_lf_halt(struct udevice *dev); +int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len); +int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp); +int nix_lf_init(struct udevice *dev); +int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len); + +#endif /* __NIX_H__ */ diff --git a/drivers/net/octeontx2/nix_af.c b/drivers/net/octeontx2/nix_af.c new file mode 100644 index 00000000000..d513917ee75 --- /dev/null +++ b/drivers/net/octeontx2/nix_af.c @@ -0,0 +1,1102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <errno.h> +#include <malloc.h> +#include <memalign.h> +#include <misc.h> +#include <net.h> +#include <pci.h> +#include <watchdog.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/log2.h> +#include <asm/arch/board.h> +#include <asm/arch/csrs/csrs-npc.h> +#include <asm/arch/csrs/csrs-lmt.h> +#include <asm/io.h> + +#include "nix.h" +#include "lmt.h" +#include "cgx.h" + +static struct nix_aq_cq_dis cq_dis ALIGNED; +static struct nix_aq_rq_dis rq_dis ALIGNED; +static struct nix_aq_sq_dis sq_dis ALIGNED; + +/*************** + * NPA API + ***************/ +int npa_attach_aura(struct nix_af *nix_af, int lf, + const union npa_aura_s *desc, u32 aura_id) +{ + struct npa_af *npa = nix_af->npa_af; + union npa_aq_inst_s *inst; + union npa_aq_res_s *res; + union npa_af_aq_status aq_stat; + union npa_aura_s *context; + u64 head; + ulong start; + + debug("%s(%p, %d, %p, %u)\n", __func__, nix_af, lf, desc, aura_id); + aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS()); + head = aq_stat.s.head_ptr; + inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head; + res = (union npa_aq_res_s *)(npa->aq.res.base); + + memset(inst, 0, sizeof(*inst)); + inst->s.lf = lf; + inst->s.doneint = 0; + inst->s.ctype = NPA_AQ_CTYPE_E_AURA; + inst->s.op = NPA_AQ_INSTOP_E_INIT; + inst->s.res_addr = npa->aq.res.iova; + inst->s.cindex = aura_id; + + context = (union npa_aura_s *)(npa->aq.res.base + + CONFIG_SYS_CACHELINE_SIZE); + memset(npa->aq.res.base, 0, npa->aq.res.entry_sz); + memcpy(context, desc, sizeof(union npa_aura_s)); + __iowmb(); + npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1); + + start = get_timer(0); + while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) && + (get_timer(start) < 1000)) + WATCHDOG_RESET(); + if (res->s.compcode != NPA_AQ_COMP_E_GOOD) { + printf("%s: Error: result 0x%x not good\n", + __func__, res->s.compcode); + return -1; + } + + return 0; +} + +int npa_attach_pool(struct nix_af *nix_af, int lf, + const union npa_pool_s *desc, u32 pool_id) +{ + union npa_aq_inst_s *inst; + union npa_aq_res_s *res; + union npa_af_aq_status aq_stat; + struct npa_af *npa = nix_af->npa_af; + union npa_aura_s *context; + u64 head; + ulong start; + + debug("%s(%p, %d, %p, %u)\n", __func__, nix_af, lf, desc, pool_id); + aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS()); + head = aq_stat.s.head_ptr; + + inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head; + res = (union npa_aq_res_s *)(npa->aq.res.base); + + memset(inst, 0, sizeof(*inst)); + inst->s.cindex = pool_id; + inst->s.lf = lf; + inst->s.doneint = 0; + inst->s.ctype = NPA_AQ_CTYPE_E_POOL; + inst->s.op = NPA_AQ_INSTOP_E_INIT; + inst->s.res_addr = npa->aq.res.iova; + + context = (union npa_aura_s *)(npa->aq.res.base + + CONFIG_SYS_CACHELINE_SIZE); + memset(npa->aq.res.base, 0, npa->aq.res.entry_sz); + memcpy(context, desc, sizeof(union npa_aura_s)); + __iowmb(); + npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1); + + start = get_timer(0); + while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) && + (get_timer(start) < 1000)) + WATCHDOG_RESET(); + + if (res->s.compcode != NPA_AQ_COMP_E_GOOD) { + printf("%s: Error: result 0x%x not good\n", + __func__, res->s.compcode); + return -1; + } + + return 0; +} + +int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base) +{ + union npa_af_lf_rst lf_rst; + union npa_af_lfx_auras_cfg auras_cfg; + struct npa_af *npa_af = npa->npa_af; + + debug("%s(%p, %d, 0x%llx)\n", __func__, npa_af, lf, aura_base); + lf_rst.u = 0; + lf_rst.s.exec = 1; + lf_rst.s.lf = lf; + npa_af_reg_write(npa_af, NPA_AF_LF_RST(), lf_rst.u); + + do { + lf_rst.u = npa_af_reg_read(npa_af, NPA_AF_LF_RST()); + WATCHDOG_RESET(); + } while (lf_rst.s.exec); + + /* Set Aura size and enable caching of contexts */ + auras_cfg.u = npa_af_reg_read(npa_af, NPA_AF_LFX_AURAS_CFG(lf)); + auras_cfg.s.loc_aura_size = NPA_AURA_SIZE_DEFAULT; //FIXME aura_size; + auras_cfg.s.caching = 1; + auras_cfg.s.rmt_aura_size = 0; + auras_cfg.s.rmt_aura_offset = 0; + auras_cfg.s.rmt_lf = 0; + npa_af_reg_write(npa_af, NPA_AF_LFX_AURAS_CFG(lf), auras_cfg.u); + /* Configure aura HW context base */ + npa_af_reg_write(npa_af, NPA_AF_LFX_LOC_AURAS_BASE(lf), + aura_base); + + return 0; +} + +int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count) +{ + int pool_id; + u32 head; + union npa_aq_inst_s *inst; + union npa_aq_res_s *res; + struct npa_aq_pool_request { + union npa_aq_res_s resp ALIGNED; + union npa_pool_s p0 ALIGNED; + union npa_pool_s p1 ALIGNED; + } pool_req ALIGNED; + struct npa_aq_aura_request { + union npa_aq_res_s resp ALIGNED; + union npa_aura_s a0 ALIGNED; + union npa_aura_s a1 ALIGNED; + } aura_req ALIGNED; + union npa_af_aq_status aq_stat; + union npa_af_lf_rst lf_rst; + struct npa_af *npa = nix_af->npa_af; + ulong start; + + for (pool_id = 0; pool_id < pool_count; pool_id++) { + aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS()); + head = aq_stat.s.head_ptr; + inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head; + res = &pool_req.resp; + + memset(inst, 0, sizeof(*inst)); + inst->s.cindex = pool_id; + inst->s.lf = lf; + inst->s.doneint = 0; + inst->s.ctype = NPA_AQ_CTYPE_E_POOL; + inst->s.op = NPA_AQ_INSTOP_E_WRITE; + inst->s.res_addr = (u64)&pool_req.resp; + + memset((void *)&pool_req, 0, sizeof(pool_req)); + pool_req.p0.s.ena = 0; + pool_req.p1.s.ena = 1; /* Write mask */ + __iowmb(); + + npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1); + + start = get_timer(0); + while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) && + (get_timer(start) < 1000)) + WATCHDOG_RESET(); + + if (res->s.compcode != NPA_AQ_COMP_E_GOOD) { + printf("%s: Error: result 0x%x not good for lf %d\n" + " aura id %d", __func__, res->s.compcode, lf, + pool_id); + return -1; + } + debug("%s(LF %d, pool id %d) disabled\n", __func__, lf, + pool_id); + } + + for (pool_id = 0; pool_id < pool_count; pool_id++) { + aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS()); + head = aq_stat.s.head_ptr; + inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head; + res = &aura_req.resp; + + memset(inst, 0, sizeof(*inst)); + inst->s.cindex = pool_id; + inst->s.lf = lf; + inst->s.doneint = 0; + inst->s.ctype = NPA_AQ_CTYPE_E_AURA; + inst->s.op = NPA_AQ_INSTOP_E_WRITE; + inst->s.res_addr = (u64)&aura_req.resp; + + memset((void *)&aura_req, 0, sizeof(aura_req)); + aura_req.a0.s.ena = 0; + aura_req.a1.s.ena = 1; /* Write mask */ + __iowmb(); + + npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1); + + start = get_timer(0); + while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) && + (get_timer(start) < 1000)) + WATCHDOG_RESET(); + + if (res->s.compcode != NPA_AQ_COMP_E_GOOD) { + printf("%s: Error: result 0x%x not good for lf %d\n" + " aura id %d", __func__, res->s.compcode, lf, + pool_id); + return -1; + } + debug("%s(LF %d, aura id %d) disabled\n", __func__, lf, + pool_id); + } + + /* Reset the LF */ + lf_rst.u = 0; + lf_rst.s.exec = 1; + lf_rst.s.lf = lf; + npa_af_reg_write(npa, NPA_AF_LF_RST(), lf_rst.u); + + do { + lf_rst.u = npa_af_reg_read(npa, NPA_AF_LF_RST()); + WATCHDOG_RESET(); + } while (lf_rst.s.exec); + + return 0; +} + +int npa_af_setup(struct npa_af *npa_af) +{ + int err; + union npa_af_gen_cfg npa_cfg; + union npa_af_ndc_cfg ndc_cfg; + union npa_af_aq_cfg aq_cfg; + union npa_af_blk_rst blk_rst; + + err = rvu_aq_alloc(&npa_af->aq, Q_COUNT(AQ_SIZE), + sizeof(union npa_aq_inst_s), + sizeof(union npa_aq_res_s)); + if (err) { + printf("%s: Error %d allocating admin queue\n", __func__, err); + return err; + } + debug("%s: NPA admin queue allocated at %p %llx\n", __func__, + npa_af->aq.inst.base, npa_af->aq.inst.iova); + + blk_rst.u = 0; + blk_rst.s.rst = 1; + npa_af_reg_write(npa_af, NPA_AF_BLK_RST(), blk_rst.u); + + /* Wait for reset to complete */ + do { + blk_rst.u = npa_af_reg_read(npa_af, NPA_AF_BLK_RST()); + WATCHDOG_RESET(); + } while (blk_rst.s.busy); + + /* Set little Endian */ + npa_cfg.u = npa_af_reg_read(npa_af, NPA_AF_GEN_CFG()); + npa_cfg.s.af_be = 0; + npa_af_reg_write(npa_af, NPA_AF_GEN_CFG(), npa_cfg.u); + /* Enable NDC cache */ + ndc_cfg.u = npa_af_reg_read(npa_af, NPA_AF_NDC_CFG()); + ndc_cfg.s.ndc_bypass = 0; + npa_af_reg_write(npa_af, NPA_AF_NDC_CFG(), ndc_cfg.u); + /* Set up queue size */ + aq_cfg.u = npa_af_reg_read(npa_af, NPA_AF_AQ_CFG()); + aq_cfg.s.qsize = AQ_SIZE; + npa_af_reg_write(npa_af, NPA_AF_AQ_CFG(), aq_cfg.u); + /* Set up queue base address */ + npa_af_reg_write(npa_af, NPA_AF_AQ_BASE(), npa_af->aq.inst.iova); + + return 0; +} + +int npa_af_shutdown(struct npa_af *npa_af) +{ + union npa_af_blk_rst blk_rst; + + blk_rst.u = 0; + blk_rst.s.rst = 1; + npa_af_reg_write(npa_af, NPA_AF_BLK_RST(), blk_rst.u); + + /* Wait for reset to complete */ + do { + blk_rst.u = npa_af_reg_read(npa_af, NPA_AF_BLK_RST()); + WATCHDOG_RESET(); + } while (blk_rst.s.busy); + + rvu_aq_free(&npa_af->aq); + + debug("%s: npa af reset --\n", __func__); + + return 0; +} + +/*************** + * NIX API + ***************/ +/** + * Setup SMQ -> TL4 -> TL3 -> TL2 -> TL1 -> MAC mapping + * + * @param nix Handle to setup + * + * @return 0, or negative on failure + */ +static int nix_af_setup_sq(struct nix *nix) +{ + union nixx_af_tl1x_schedule tl1_sched; + union nixx_af_tl2x_parent tl2_parent; + union nixx_af_tl3x_parent tl3_parent; + union nixx_af_tl3_tl2x_cfg tl3_tl2_cfg; + union nixx_af_tl3_tl2x_linkx_cfg tl3_tl2_link_cfg; + union nixx_af_tl4x_parent tl4_parent; + union nixx_af_tl4x_sdp_link_cfg tl4_sdp_link_cfg; + union nixx_af_smqx_cfg smq_cfg; + union nixx_af_mdqx_schedule mdq_sched; + union nixx_af_mdqx_parent mdq_parent; + union nixx_af_rx_linkx_cfg link_cfg; + int tl1_index = nix->lmac->link_num; /* NIX_LINK_E enum */ + int tl2_index = tl1_index; + int tl3_index = tl2_index; + int tl4_index = tl3_index; + int smq_index = tl4_index; + struct nix_af *nix_af = nix->nix_af; + u64 offset = 0; + + tl1_sched.u = nix_af_reg_read(nix_af, + NIXX_AF_TL1X_SCHEDULE(tl1_index)); + tl1_sched.s.rr_quantum = MAX_MTU; + nix_af_reg_write(nix_af, NIXX_AF_TL1X_SCHEDULE(tl1_index), + tl1_sched.u); + + tl2_parent.u = nix_af_reg_read(nix_af, + NIXX_AF_TL2X_PARENT(tl2_index)); + tl2_parent.s.parent = tl1_index; + nix_af_reg_write(nix_af, NIXX_AF_TL2X_PARENT(tl2_index), + tl2_parent.u); + + tl3_parent.u = nix_af_reg_read(nix_af, + NIXX_AF_TL3X_PARENT(tl3_index)); + tl3_parent.s.parent = tl2_index; + nix_af_reg_write(nix_af, NIXX_AF_TL3X_PARENT(tl3_index), + tl3_parent.u); + tl3_tl2_cfg.u = nix_af_reg_read(nix_af, + NIXX_AF_TL3_TL2X_CFG(tl3_index)); + tl3_tl2_cfg.s.express = 0; + nix_af_reg_write(nix_af, NIXX_AF_TL3_TL2X_CFG(tl3_index), + tl3_tl2_cfg.u); + + offset = NIXX_AF_TL3_TL2X_LINKX_CFG(tl3_index, + nix->lmac->link_num); + tl3_tl2_link_cfg.u = nix_af_reg_read(nix_af, offset); + tl3_tl2_link_cfg.s.bp_ena = 1; + tl3_tl2_link_cfg.s.ena = 1; + tl3_tl2_link_cfg.s.relchan = 0; + offset = NIXX_AF_TL3_TL2X_LINKX_CFG(tl3_index, + nix->lmac->link_num); + nix_af_reg_write(nix_af, offset, tl3_tl2_link_cfg.u); + + tl4_parent.u = nix_af_reg_read(nix_af, + NIXX_AF_TL4X_PARENT(tl4_index)); + tl4_parent.s.parent = tl3_index; + nix_af_reg_write(nix_af, NIXX_AF_TL4X_PARENT(tl4_index), + tl4_parent.u); + + offset = NIXX_AF_TL4X_SDP_LINK_CFG(tl4_index); + tl4_sdp_link_cfg.u = nix_af_reg_read(nix_af, offset); + tl4_sdp_link_cfg.s.bp_ena = 0; + tl4_sdp_link_cfg.s.ena = 0; + tl4_sdp_link_cfg.s.relchan = 0; + offset = NIXX_AF_TL4X_SDP_LINK_CFG(tl4_index); + nix_af_reg_write(nix_af, offset, tl4_sdp_link_cfg.u); + + smq_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_SMQX_CFG(smq_index)); + smq_cfg.s.express = 0; + smq_cfg.s.lf = nix->lf; + smq_cfg.s.desc_shp_ctl_dis = 1; + smq_cfg.s.maxlen = MAX_MTU; + smq_cfg.s.minlen = NIX_MIN_HW_MTU; + nix_af_reg_write(nix_af, NIXX_AF_SMQX_CFG(smq_index), smq_cfg.u); + + mdq_sched.u = nix_af_reg_read(nix_af, + NIXX_AF_MDQX_SCHEDULE(smq_index)); + mdq_sched.s.rr_quantum = MAX_MTU; + offset = NIXX_AF_MDQX_SCHEDULE(smq_index); + nix_af_reg_write(nix_af, offset, mdq_sched.u); + mdq_parent.u = nix_af_reg_read(nix_af, + NIXX_AF_MDQX_PARENT(smq_index)); + mdq_parent.s.parent = tl4_index; + nix_af_reg_write(nix_af, NIXX_AF_MDQX_PARENT(smq_index), + mdq_parent.u); + + link_cfg.u = 0; + link_cfg.s.maxlen = NIX_MAX_HW_MTU; + link_cfg.s.minlen = NIX_MIN_HW_MTU; + nix_af_reg_write(nix->nix_af, + NIXX_AF_RX_LINKX_CFG(nix->lmac->link_num), + link_cfg.u); + + return 0; +} + +/** + * Issue a command to the NIX AF Admin Queue + * + * @param nix nix handle + * @param lf Logical function number for command + * @param op Operation + * @param ctype Context type + * @param cindex Context index + * @param resp Result pointer + * + * @return 0 for success, -EBUSY on failure + */ +static int nix_aq_issue_command(struct nix_af *nix_af, + int lf, + int op, + int ctype, + int cindex, union nix_aq_res_s *resp) +{ + union nixx_af_aq_status aq_status; + union nix_aq_inst_s *aq_inst; + union nix_aq_res_s *result = resp; + ulong start; + + debug("%s(%p, 0x%x, 0x%x, 0x%x, 0x%x, %p)\n", __func__, nix_af, lf, + op, ctype, cindex, resp); + aq_status.u = nix_af_reg_read(nix_af, NIXX_AF_AQ_STATUS()); + aq_inst = (union nix_aq_inst_s *)(nix_af->aq.inst.base) + + aq_status.s.head_ptr; + aq_inst->u[0] = 0; + aq_inst->u[1] = 0; + aq_inst->s.op = op; + aq_inst->s.ctype = ctype; + aq_inst->s.lf = lf; + aq_inst->s.cindex = cindex; + aq_inst->s.doneint = 0; + aq_inst->s.res_addr = (u64)resp; + debug("%s: inst@%p: 0x%llx 0x%llx\n", __func__, aq_inst, + aq_inst->u[0], aq_inst->u[1]); + __iowmb(); + + /* Ring doorbell and wait for result */ + nix_af_reg_write(nix_af, NIXX_AF_AQ_DOOR(), 1); + + start = get_timer(0); + /* Wait for completion */ + do { + WATCHDOG_RESET(); + dsb(); + } while (result->s.compcode == 0 && get_timer(start) < 2); + + if (result->s.compcode != NIX_AQ_COMP_E_GOOD) { + printf("NIX:AQ fail or time out with code %d after %ld ms\n", + result->s.compcode, get_timer(start)); + return -EBUSY; + } + return 0; +} + +static int nix_attach_receive_queue(struct nix_af *nix_af, int lf) +{ + struct nix_aq_rq_request rq_req ALIGNED; + int err; + + debug("%s(%p, %d)\n", __func__, nix_af, lf); + + memset(&rq_req, 0, sizeof(struct nix_aq_rq_request)); + + rq_req.rq.s.ena = 1; + rq_req.rq.s.spb_ena = 1; + rq_req.rq.s.ipsech_ena = 0; + rq_req.rq.s.ena_wqwd = 0; + rq_req.rq.s.cq = NIX_CQ_RX; + rq_req.rq.s.substream = 0; /* FIXME: Substream IDs? */ + rq_req.rq.s.wqe_aura = -1; /* No WQE aura */ + rq_req.rq.s.spb_aura = NPA_POOL_RX; + rq_req.rq.s.lpb_aura = NPA_POOL_RX; + /* U-Boot doesn't use WQE group for anything */ + rq_req.rq.s.pb_caching = 1; + rq_req.rq.s.xqe_drop_ena = 0; /* Disable RED dropping */ + rq_req.rq.s.spb_drop_ena = 0; + rq_req.rq.s.lpb_drop_ena = 0; + rq_req.rq.s.spb_sizem1 = (MAX_MTU / (3 * 8)) - 1; /* 512 bytes */ + rq_req.rq.s.lpb_sizem1 = (MAX_MTU / 8) - 1; + rq_req.rq.s.first_skip = 0; + rq_req.rq.s.later_skip = 0; + rq_req.rq.s.xqe_imm_copy = 0; + rq_req.rq.s.xqe_hdr_split = 0; + rq_req.rq.s.xqe_drop = 0; + rq_req.rq.s.xqe_pass = 0; + rq_req.rq.s.wqe_pool_drop = 0; /* No WQE pool */ + rq_req.rq.s.wqe_pool_pass = 0; /* No WQE pool */ + rq_req.rq.s.spb_aura_drop = 255; + rq_req.rq.s.spb_aura_pass = 255; + rq_req.rq.s.spb_pool_drop = 0; + rq_req.rq.s.spb_pool_pass = 0; + rq_req.rq.s.lpb_aura_drop = 255; + rq_req.rq.s.lpb_aura_pass = 255; + rq_req.rq.s.lpb_pool_drop = 0; + rq_req.rq.s.lpb_pool_pass = 0; + rq_req.rq.s.qint_idx = 0; + + err = nix_aq_issue_command(nix_af, lf, + NIX_AQ_INSTOP_E_INIT, + NIX_AQ_CTYPE_E_RQ, + 0, &rq_req.resp); + if (err) { + printf("%s: Error requesting send queue\n", __func__); + return err; + } + + return 0; +} + +static int nix_attach_send_queue(struct nix *nix) +{ + struct nix_af *nix_af = nix->nix_af; + struct nix_aq_sq_request sq_req ALIGNED; + int err; + + debug("%s(%p)\n", __func__, nix_af); + err = nix_af_setup_sq(nix); + + memset(&sq_req, 0, sizeof(sq_req)); + + sq_req.sq.s.ena = 1; + sq_req.sq.s.cq_ena = 1; + sq_req.sq.s.max_sqe_size = NIX_MAXSQESZ_E_W16; + sq_req.sq.s.substream = 0; // FIXME: Substream IDs? + sq_req.sq.s.sdp_mcast = 0; + sq_req.sq.s.cq = NIX_CQ_TX; + sq_req.sq.s.cq_limit = 0; + sq_req.sq.s.smq = nix->lmac->link_num; // scheduling index + sq_req.sq.s.sso_ena = 0; + sq_req.sq.s.smq_rr_quantum = MAX_MTU / 4; + sq_req.sq.s.default_chan = nix->lmac->chan_num; + sq_req.sq.s.sqe_stype = NIX_STYPE_E_STP; + sq_req.sq.s.qint_idx = 0; + sq_req.sq.s.sqb_aura = NPA_POOL_SQB; + + err = nix_aq_issue_command(nix_af, nix->lf, + NIX_AQ_INSTOP_E_INIT, + NIX_AQ_CTYPE_E_SQ, + 0, &sq_req.resp); + if (err) { + printf("%s: Error requesting send queue\n", __func__); + return err; + } + + return 0; +} + +static int nix_attach_completion_queue(struct nix *nix, int cq_idx) +{ + struct nix_af *nix_af = nix->nix_af; + struct nix_aq_cq_request cq_req ALIGNED; + int err; + + debug("%s(%p)\n", __func__, nix_af); + memset(&cq_req, 0, sizeof(cq_req)); + cq_req.cq.s.ena = 1; + cq_req.cq.s.bpid = nix->lmac->pknd; + cq_req.cq.s.substream = 0; /* FIXME: Substream IDs? */ + cq_req.cq.s.drop_ena = 0; + cq_req.cq.s.caching = 1; + cq_req.cq.s.qsize = CQS_QSIZE; + cq_req.cq.s.drop = 255 * 7 / 8; + cq_req.cq.s.qint_idx = 0; + cq_req.cq.s.cint_idx = 0; + cq_req.cq.s.base = nix->cq[cq_idx].iova; + debug("%s: CQ(%d) base %p\n", __func__, cq_idx, + nix->cq[cq_idx].base); + + err = nix_aq_issue_command(nix_af, nix->lf, + NIX_AQ_INSTOP_E_INIT, + NIX_AQ_CTYPE_E_CQ, + cq_idx, &cq_req.resp); + if (err) { + printf("%s: Error requesting completion queue\n", __func__); + return err; + } + debug("%s: CQ(%d) allocated, base %p\n", __func__, cq_idx, + nix->cq[cq_idx].base); + + return 0; +} + +int nix_lf_admin_setup(struct nix *nix) +{ + union nixx_af_lfx_rqs_cfg rqs_cfg; + union nixx_af_lfx_sqs_cfg sqs_cfg; + union nixx_af_lfx_cqs_cfg cqs_cfg; + union nixx_af_lfx_rss_cfg rss_cfg; + union nixx_af_lfx_cints_cfg cints_cfg; + union nixx_af_lfx_qints_cfg qints_cfg; + union nixx_af_lfx_rss_grpx rss_grp; + union nixx_af_lfx_tx_cfg2 tx_cfg2; + union nixx_af_lfx_cfg lfx_cfg; + union nixx_af_lf_rst lf_rst; + u32 index; + struct nix_af *nix_af = nix->nix_af; + int err; + + /* Reset the LF */ + lf_rst.u = 0; + lf_rst.s.lf = nix->lf; + lf_rst.s.exec = 1; + nix_af_reg_write(nix_af, NIXX_AF_LF_RST(), lf_rst.u); + + do { + lf_rst.u = nix_af_reg_read(nix_af, NIXX_AF_LF_RST()); + WATCHDOG_RESET(); + } while (lf_rst.s.exec); + + /* Config NIX RQ HW context and base*/ + nix_af_reg_write(nix_af, NIXX_AF_LFX_RQS_BASE(nix->lf), + (u64)nix->rq_ctx_base); + /* Set caching and queue count in HW */ + rqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_RQS_CFG(nix->lf)); + rqs_cfg.s.caching = 1; + rqs_cfg.s.max_queuesm1 = nix->rq_cnt - 1; + nix_af_reg_write(nix_af, NIXX_AF_LFX_RQS_CFG(nix->lf), rqs_cfg.u); + + /* Config NIX SQ HW context and base*/ + nix_af_reg_write(nix_af, NIXX_AF_LFX_SQS_BASE(nix->lf), + (u64)nix->sq_ctx_base); + sqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_SQS_CFG(nix->lf)); + sqs_cfg.s.caching = 1; + sqs_cfg.s.max_queuesm1 = nix->sq_cnt - 1; + nix_af_reg_write(nix_af, NIXX_AF_LFX_SQS_CFG(nix->lf), sqs_cfg.u); + + /* Config NIX CQ HW context and base*/ + nix_af_reg_write(nix_af, NIXX_AF_LFX_CQS_BASE(nix->lf), + (u64)nix->cq_ctx_base); + cqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_CQS_CFG(nix->lf)); + cqs_cfg.s.caching = 1; + cqs_cfg.s.max_queuesm1 = nix->cq_cnt - 1; + nix_af_reg_write(nix_af, NIXX_AF_LFX_CQS_CFG(nix->lf), cqs_cfg.u); + + /* Config NIX RSS HW context and base */ + nix_af_reg_write(nix_af, NIXX_AF_LFX_RSS_BASE(nix->lf), + (u64)nix->rss_base); + rss_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_RSS_CFG(nix->lf)); + rss_cfg.s.ena = 1; + rss_cfg.s.size = ilog2(nix->rss_sz) / 256; + nix_af_reg_write(nix_af, NIXX_AF_LFX_RSS_CFG(nix->lf), rss_cfg.u); + + for (index = 0; index < nix->rss_grps; index++) { + rss_grp.u = 0; + rss_grp.s.sizem1 = 0x7; + rss_grp.s.offset = nix->rss_sz * index; + nix_af_reg_write(nix_af, + NIXX_AF_LFX_RSS_GRPX(nix->lf, index), + rss_grp.u); + } + + /* Config CQints HW contexts and base */ + nix_af_reg_write(nix_af, NIXX_AF_LFX_CINTS_BASE(nix->lf), + (u64)nix->cint_base); + cints_cfg.u = nix_af_reg_read(nix_af, + NIXX_AF_LFX_CINTS_CFG(nix->lf)); + cints_cfg.s.caching = 1; + nix_af_reg_write(nix_af, NIXX_AF_LFX_CINTS_CFG(nix->lf), + cints_cfg.u); + + /* Config Qints HW context and base */ + nix_af_reg_write(nix_af, NIXX_AF_LFX_QINTS_BASE(nix->lf), + (u64)nix->qint_base); + qints_cfg.u = nix_af_reg_read(nix_af, + NIXX_AF_LFX_QINTS_CFG(nix->lf)); + qints_cfg.s.caching = 1; + nix_af_reg_write(nix_af, NIXX_AF_LFX_QINTS_CFG(nix->lf), + qints_cfg.u); + + debug("%s(%p, %d, %d)\n", __func__, nix_af, nix->lf, nix->pf); + + /* Enable LMTST for this NIX LF */ + tx_cfg2.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_TX_CFG2(nix->lf)); + tx_cfg2.s.lmt_ena = 1; + nix_af_reg_write(nix_af, NIXX_AF_LFX_TX_CFG2(nix->lf), tx_cfg2.u); + + /* Use 16-word XQEs, write the npa pf_func number only */ + lfx_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_CFG(nix->lf)); + lfx_cfg.s.xqe_size = NIX_XQESZ_E_W16; + lfx_cfg.s.npa_pf_func = nix->pf_func; + nix_af_reg_write(nix_af, NIXX_AF_LFX_CFG(nix->lf), lfx_cfg.u); + + nix_af_reg_write(nix_af, NIXX_AF_LFX_RX_CFG(nix->lf), 0); + + for (index = 0; index < nix->cq_cnt; index++) { + err = nix_attach_completion_queue(nix, index); + if (err) { + printf("%s: Error attaching completion queue %d\n", + __func__, index); + return err; + } + } + + for (index = 0; index < nix->rq_cnt; index++) { + err = nix_attach_receive_queue(nix_af, nix->lf); + if (err) { + printf("%s: Error attaching receive queue %d\n", + __func__, index); + return err; + } + } + + for (index = 0; index < nix->sq_cnt; index++) { + err = nix_attach_send_queue(nix); + if (err) { + printf("%s: Error attaching send queue %d\n", + __func__, index); + return err; + } + } + + return 0; +} + +int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf, + u32 cq_count, u32 rq_count, u32 sq_count) +{ + union nixx_af_rx_sw_sync sw_sync; + union nixx_af_lf_rst lf_rst; + int index, err; + + /* Flush all tx packets */ + sw_sync.u = 0; + sw_sync.s.ena = 1; + nix_af_reg_write(nix_af, NIXX_AF_RX_SW_SYNC(), sw_sync.u); + + do { + sw_sync.u = nix_af_reg_read(nix_af, NIXX_AF_RX_SW_SYNC()); + WATCHDOG_RESET(); + } while (sw_sync.s.ena); + + for (index = 0; index < rq_count; index++) { + memset((void *)&rq_dis, 0, sizeof(rq_dis)); + rq_dis.rq.s.ena = 0; /* Context */ + rq_dis.mrq.s.ena = 1; /* Mask */ + __iowmb(); + + err = nix_aq_issue_command(nix_af, lf, + NIX_AQ_INSTOP_E_WRITE, + NIX_AQ_CTYPE_E_RQ, + index, &rq_dis.resp); + if (err) { + printf("%s: Error disabling LF %d RQ(%d)\n", + __func__, lf, index); + return err; + } + debug("%s: LF %d RQ(%d) disabled\n", __func__, lf, index); + } + + for (index = 0; index < sq_count; index++) { + memset((void *)&sq_dis, 0, sizeof(sq_dis)); + sq_dis.sq.s.ena = 0; /* Context */ + sq_dis.msq.s.ena = 1; /* Mask */ + __iowmb(); + + err = nix_aq_issue_command(nix_af, lf, + NIX_AQ_INSTOP_E_WRITE, + NIX_AQ_CTYPE_E_SQ, + index, &sq_dis.resp); + if (err) { + printf("%s: Error disabling LF %d SQ(%d)\n", + __func__, lf, index); + return err; + } + debug("%s: LF %d SQ(%d) disabled\n", __func__, lf, index); + } + + for (index = 0; index < cq_count; index++) { + memset((void *)&cq_dis, 0, sizeof(cq_dis)); + cq_dis.cq.s.ena = 0; /* Context */ + cq_dis.mcq.s.ena = 1; /* Mask */ + __iowmb(); + + err = nix_aq_issue_command(nix_af, lf, + NIX_AQ_INSTOP_E_WRITE, + NIX_AQ_CTYPE_E_CQ, + index, &cq_dis.resp); + if (err) { + printf("%s: Error disabling LF %d CQ(%d)\n", + __func__, lf, index); + return err; + } + debug("%s: LF %d CQ(%d) disabled\n", __func__, lf, index); + } + + /* Reset the LF */ + lf_rst.u = 0; + lf_rst.s.lf = lf; + lf_rst.s.exec = 1; + nix_af_reg_write(nix_af, NIXX_AF_LF_RST(), lf_rst.u); + + do { + lf_rst.u = nix_af_reg_read(nix_af, NIXX_AF_LF_RST()); + WATCHDOG_RESET(); + } while (lf_rst.s.exec); + + return 0; +} + +int npc_lf_admin_setup(struct nix *nix) +{ + union npc_af_const af_const; + union npc_af_pkindx_action0 action0; + union npc_af_pkindx_action1 action1; + union npc_af_intfx_kex_cfg kex_cfg; + union npc_af_intfx_miss_stat_act intfx_stat_act; + union npc_af_mcamex_bankx_camx_intf camx_intf; + union npc_af_mcamex_bankx_camx_w0 camx_w0; + union npc_af_mcamex_bankx_cfg bankx_cfg; + union npc_af_mcamex_bankx_stat_act mcamex_stat_act; + + union nix_rx_action_s rx_action; + union nix_tx_action_s tx_action; + + struct nix_af *nix_af = nix->nix_af; + u32 kpus; + int pkind = nix->lmac->link_num; + int index; + u64 offset; + + debug("%s(%p, pkind 0x%x)\n", __func__, nix_af, pkind); + af_const.u = npc_af_reg_read(nix_af, NPC_AF_CONST()); + kpus = af_const.s.kpus; + + action0.u = 0; + action0.s.parse_done = 1; + npc_af_reg_write(nix_af, NPC_AF_PKINDX_ACTION0(pkind), action0.u); + + action1.u = 0; + npc_af_reg_write(nix_af, NPC_AF_PKINDX_ACTION1(pkind), action1.u); + + kex_cfg.u = 0; + kex_cfg.s.keyw = NPC_MCAMKEYW_E_X1; + kex_cfg.s.parse_nibble_ena = 0x7; + npc_af_reg_write(nix_af, + NPC_AF_INTFX_KEX_CFG(NPC_INTF_E_NIXX_RX(0)), + kex_cfg.u); + + /* HW Issue */ + kex_cfg.u = 0; + kex_cfg.s.parse_nibble_ena = 0x7; + npc_af_reg_write(nix_af, + NPC_AF_INTFX_KEX_CFG(NPC_INTF_E_NIXX_TX(0)), + kex_cfg.u); + + camx_intf.u = 0; + camx_intf.s.intf = ~NPC_INTF_E_NIXX_RX(0); + npc_af_reg_write(nix_af, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(pkind, 0, 0), + camx_intf.u); + + camx_intf.u = 0; + camx_intf.s.intf = NPC_INTF_E_NIXX_RX(0); + npc_af_reg_write(nix_af, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(pkind, 0, 1), + camx_intf.u); + + camx_w0.u = 0; + camx_w0.s.md = ~(nix->lmac->chan_num) & (~((~0x0ull) << 12)); + debug("NPC LF ADMIN camx_w0.u %llx\n", camx_w0.u); + npc_af_reg_write(nix_af, + NPC_AF_MCAMEX_BANKX_CAMX_W0(pkind, 0, 0), + camx_w0.u); + + camx_w0.u = 0; + camx_w0.s.md = nix->lmac->chan_num; + npc_af_reg_write(nix_af, + NPC_AF_MCAMEX_BANKX_CAMX_W0(pkind, 0, 1), + camx_w0.u); + + npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CAMX_W1(pkind, 0, 0), + 0); + + npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CAMX_W1(pkind, 0, 1), + 0); + + /* Enable stats for NPC INTF RX */ + mcamex_stat_act.u = 0; + mcamex_stat_act.s.ena = 1; + mcamex_stat_act.s.stat_sel = pkind; + npc_af_reg_write(nix_af, + NPC_AF_MCAMEX_BANKX_STAT_ACT(pkind, 0), + mcamex_stat_act.u); + intfx_stat_act.u = 0; + intfx_stat_act.s.ena = 1; + intfx_stat_act.s.stat_sel = 16; + offset = NPC_AF_INTFX_MISS_STAT_ACT(NPC_INTF_E_NIXX_RX(0)); + npc_af_reg_write(nix_af, offset, intfx_stat_act.u); + rx_action.u = 0; + rx_action.s.pf_func = nix->pf_func; + rx_action.s.op = NIX_RX_ACTIONOP_E_UCAST; + npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_ACTION(pkind, 0), + rx_action.u); + + for (index = 0; index < kpus; index++) + npc_af_reg_write(nix_af, NPC_AF_KPUX_CFG(index), 0); + + rx_action.u = 0; + rx_action.s.pf_func = nix->pf_func; + rx_action.s.op = NIX_RX_ACTIONOP_E_DROP; + npc_af_reg_write(nix_af, + NPC_AF_INTFX_MISS_ACT(NPC_INTF_E_NIXX_RX(0)), + rx_action.u); + bankx_cfg.u = 0; + bankx_cfg.s.ena = 1; + npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CFG(pkind, 0), + bankx_cfg.u); + + tx_action.u = 0; + tx_action.s.op = NIX_TX_ACTIONOP_E_UCAST_DEFAULT; + npc_af_reg_write(nix_af, + NPC_AF_INTFX_MISS_ACT(NPC_INTF_E_NIXX_TX(0)), + tx_action.u); + +#ifdef DEBUG + /* Enable debug capture on RX intf */ + npc_af_reg_write(nix_af, NPC_AF_DBG_CTL(), 0x4); +#endif + + return 0; +} + +int npc_af_shutdown(struct nix_af *nix_af) +{ + union npc_af_blk_rst blk_rst; + + blk_rst.u = 0; + blk_rst.s.rst = 1; + npc_af_reg_write(nix_af, NPC_AF_BLK_RST(), blk_rst.u); + + /* Wait for reset to complete */ + do { + blk_rst.u = npc_af_reg_read(nix_af, NPC_AF_BLK_RST()); + WATCHDOG_RESET(); + } while (blk_rst.s.busy); + + debug("%s: npc af reset --\n", __func__); + + return 0; +} + +int nix_af_setup(struct nix_af *nix_af) +{ + int err; + union nixx_af_const2 af_const2; + union nixx_af_const3 af_const3; + union nixx_af_sq_const sq_const; + union nixx_af_cfg af_cfg; + union nixx_af_status af_status; + union nixx_af_ndc_cfg ndc_cfg; + union nixx_af_aq_cfg aq_cfg; + union nixx_af_blk_rst blk_rst; + + debug("%s(%p)\n", __func__, nix_af); + err = rvu_aq_alloc(&nix_af->aq, Q_COUNT(AQ_SIZE), + sizeof(union nix_aq_inst_s), + sizeof(union nix_aq_res_s)); + if (err) { + printf("%s: Error allocating nix admin queue\n", __func__); + return err; + } + + blk_rst.u = 0; + blk_rst.s.rst = 1; + nix_af_reg_write(nix_af, NIXX_AF_BLK_RST(), blk_rst.u); + + /* Wait for reset to complete */ + do { + blk_rst.u = nix_af_reg_read(nix_af, NIXX_AF_BLK_RST()); + WATCHDOG_RESET(); + } while (blk_rst.s.busy); + + /* Put in LE mode */ + af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG()); + if (af_cfg.s.force_cond_clk_en || af_cfg.s.calibrate_x2p || + af_cfg.s.force_intf_clk_en) { + printf("%s: Error: Invalid NIX_AF_CFG value 0x%llx\n", + __func__, af_cfg.u); + return -1; + } + af_cfg.s.af_be = 0; + af_cfg.u |= 0x5E; /* HW Issue */ + nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u); + + /* Perform Calibration */ + af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG()); + af_cfg.s.calibrate_x2p = 1; + nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u); + + /* Wait for calibration to complete */ + do { + af_status.u = nix_af_reg_read(nix_af, NIXX_AF_STATUS()); + WATCHDOG_RESET(); + } while (af_status.s.calibrate_done == 0); + + af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG()); + af_cfg.s.calibrate_x2p = 0; + nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u); + + /* Enable NDC cache */ + ndc_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_NDC_CFG()); + ndc_cfg.s.ndc_ign_pois = 0; + ndc_cfg.s.byp_sq = 0; + ndc_cfg.s.byp_sqb = 0; + ndc_cfg.s.byp_cqs = 0; + ndc_cfg.s.byp_cints = 0; + ndc_cfg.s.byp_dyno = 0; + ndc_cfg.s.byp_mce = 0; + ndc_cfg.s.byp_rqc = 0; + ndc_cfg.s.byp_rsse = 0; + ndc_cfg.s.byp_mc_data = 0; + ndc_cfg.s.byp_mc_wqe = 0; + ndc_cfg.s.byp_mr_data = 0; + ndc_cfg.s.byp_mr_wqe = 0; + ndc_cfg.s.byp_qints = 0; + nix_af_reg_write(nix_af, NIXX_AF_NDC_CFG(), ndc_cfg.u); + + /* Set up queue size */ + aq_cfg.u = 0; + aq_cfg.s.qsize = AQ_SIZE; + nix_af_reg_write(nix_af, NIXX_AF_AQ_CFG(), aq_cfg.u); + + /* Set up queue base address */ + nix_af_reg_write(nix_af, NIXX_AF_AQ_BASE(), nix_af->aq.inst.iova); + + af_const3.u = nix_af_reg_read(nix_af, NIXX_AF_CONST3()); + af_const2.u = nix_af_reg_read(nix_af, NIXX_AF_CONST2()); + sq_const.u = nix_af_reg_read(nix_af, NIXX_AF_SQ_CONST()); + nix_af->rq_ctx_sz = 1ULL << af_const3.s.rq_ctx_log2bytes; + nix_af->sq_ctx_sz = 1ULL << af_const3.s.sq_ctx_log2bytes; + nix_af->cq_ctx_sz = 1ULL << af_const3.s.cq_ctx_log2bytes; + nix_af->rsse_ctx_sz = 1ULL << af_const3.s.rsse_log2bytes; + nix_af->qints = af_const2.s.qints; + nix_af->cints = af_const2.s.cints; + nix_af->cint_ctx_sz = 1ULL << af_const3.s.cint_log2bytes; + nix_af->qint_ctx_sz = 1ULL << af_const3.s.qint_log2bytes; + nix_af->sqb_size = sq_const.s.sqb_size; + + return 0; +} + +int nix_af_shutdown(struct nix_af *nix_af) +{ + union nixx_af_blk_rst blk_rst; + + blk_rst.u = 0; + blk_rst.s.rst = 1; + nix_af_reg_write(nix_af, NIXX_AF_BLK_RST(), blk_rst.u); + + /* Wait for reset to complete */ + do { + blk_rst.u = nix_af_reg_read(nix_af, NIXX_AF_BLK_RST()); + WATCHDOG_RESET(); + } while (blk_rst.s.busy); + + rvu_aq_free(&nix_af->aq); + + debug("%s: nix af reset --\n", __func__); + + return 0; +} diff --git a/drivers/net/octeontx2/npc.h b/drivers/net/octeontx2/npc.h new file mode 100644 index 00000000000..6e645cd32ea --- /dev/null +++ b/drivers/net/octeontx2/npc.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef __NPC_H__ +#define __NPC_H__ + +#define RSVD_MCAM_ENTRIES_PER_PF 2 /** Ucast and Bcast */ +#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /** Ucast for VFs */ + +struct npc_kpu_profile_cam { + u8 state; + u8 state_mask; + u16 dp0; + u16 dp0_mask; + u16 dp1; + u16 dp1_mask; + u16 dp2; + u16 dp2_mask; +}; + +struct npc_kpu_profile_action { + u8 errlev; + u8 errcode; + u8 dp0_offset; + u8 dp1_offset; + u8 dp2_offset; + u8 bypass_count; + u8 parse_done; + u8 next_state; + u8 ptr_advance; + u8 cap_ena; + u8 lid; + u8 ltype; + u8 flags; + u8 offset; + u8 mask; + u8 right; + u8 shift; +}; + +struct npc_kpu_profile { + int cam_entries; + int action_entries; + struct npc_kpu_profile_cam *cam; + struct npc_kpu_profile_action *action; +}; + +struct npc_pkind { + struct rsrc_bmap rsrc; + u32 *pfchan_map; +}; + +struct npc_mcam { + struct rsrc_bmap rsrc; + u16 *pfvf_map; + u16 total_entries; /* Total number of MCAM entries */ + u16 entries; /* Total - reserved for NIX LFs */ + u8 banks_per_entry; /* Number of keywords in key */ + u8 keysize; + u8 banks; /* Number of MCAM banks */ + u16 banksize; /* Number of MCAM entries in each bank */ + u16 counters; /* Number of match counters */ + u16 nixlf_offset; + u16 pf_offset; +}; + +struct nix_af_handle; +struct nix_handle; +struct rvu_hwinfo; + +struct npc_af { + struct nix_af_handle *nix_af; + struct npc_pkind pkind; + void __iomem *npc_af_base; + u8 npc_kpus; /** Number of parser units */ + struct npc_mcam mcam; + struct rvu_block block; + struct rvu_hwinfo *hw; +}; + +struct npc { + struct npc_af *npc_af; + void __iomem *npc_base; + struct nix_handle *nix; +} + +#endif /* __NPC_H__ */ + diff --git a/drivers/net/octeontx2/rvu.h b/drivers/net/octeontx2/rvu.h new file mode 100644 index 00000000000..f455260a6fe --- /dev/null +++ b/drivers/net/octeontx2/rvu.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef __RVU_H__ +#define __RVU_H__ + +#include <asm/arch/csrs/csrs-rvu.h> + +#define ALIGNED __aligned(CONFIG_SYS_CACHELINE_SIZE) + +#define Q_SIZE_16 0ULL /* 16 entries */ +#define Q_SIZE_64 1ULL /* 64 entries */ +#define Q_SIZE_256 2ULL +#define Q_SIZE_1K 3ULL +#define Q_SIZE_4K 4ULL +#define Q_SIZE_16K 5ULL +#define Q_SIZE_64K 6ULL +#define Q_SIZE_256K 7ULL +#define Q_SIZE_1M 8ULL /* Million entries */ +#define Q_SIZE_MIN Q_SIZE_16 +#define Q_SIZE_MAX Q_SIZE_1M + +#define Q_COUNT(x) (16ULL << (2 * (x))) +#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2) + +/* Admin queue info */ + +/* Since we intend to add only one instruction at a time, + * keep queue size to it's minimum. + */ +#define AQ_SIZE Q_SIZE_16 +/* HW head & tail pointer mask */ +#define AQ_PTR_MASK 0xFFFFF + +struct qmem { + void *base; + dma_addr_t iova; + size_t alloc_sz; + u32 qsize; + u8 entry_sz; +}; + +struct admin_queue { + struct qmem inst; + struct qmem res; +}; + +struct rvu_af { + struct udevice *dev; + void __iomem *af_base; + struct nix_af *nix_af; +}; + +struct rvu_pf { + struct udevice *dev; + struct udevice *afdev; + void __iomem *pf_base; + struct nix *nix; + u8 pfid; + int nix_lfid; + int npa_lfid; +}; + +/** + * Store 128 bit value + * + * @param[out] dest pointer to destination address + * @param val0 first 64 bits to write + * @param val1 second 64 bits to write + */ +static inline void st128(void *dest, u64 val0, u64 val1) +{ + __asm__ __volatile__("stp %x[x0], %x[x1], [%[pm]]" : + : [x0]"r"(val0), [x1]"r"(val1), [pm]"r"(dest) + : "memory"); +} + +/** + * Load 128 bit value + * + * @param[in] source pointer to 128 bits of data to load + * @param[out] val0 first 64 bits of data + * @param[out] val1 second 64 bits of data + */ +static inline void ld128(const u64 *src, u64 *val0, u64 *val1) +{ + __asm__ __volatile__ ("ldp %x[x0], %x[x1], [%[pm]]" : + : [x0]"r"(*val0), [x1]"r"(*val1), [pm]"r"(src)); +} + +void qmem_free(struct qmem *q); +int qmem_alloc(struct qmem *q, u32 qsize, size_t entry_sz); + +/** + * Allocates an admin queue for instructions and results + * + * @param aq admin queue to allocate for + * @param qsize Number of entries in the queue + * @param inst_size Size of each instruction + * @param res_size Size of each result + * + * @return -ENOMEM on error, 0 on success + */ +int rvu_aq_alloc(struct admin_queue *aq, unsigned int qsize, + size_t inst_size, size_t res_size); + +/** + * Frees an admin queue + * + * @param aq Admin queue to free + */ +void rvu_aq_free(struct admin_queue *aq); + +void rvu_get_lfid_for_pf(int pf, int *nixid, int *npaid); + +#endif /* __RVU_H__ */ + diff --git a/drivers/net/octeontx2/rvu_af.c b/drivers/net/octeontx2/rvu_af.c new file mode 100644 index 00000000000..7750089a205 --- /dev/null +++ b/drivers/net/octeontx2/rvu_af.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <errno.h> +#include <malloc.h> +#include <misc.h> +#include <net.h> +#include <pci_ids.h> +#include <linux/list.h> +#include <asm/io.h> +#include <asm/arch/board.h> +#include <asm/arch/csrs/csrs-npa.h> + +#include "nix.h" + +struct udevice *rvu_af_dev; + +inline struct rvu_af *get_af(void) +{ + return rvu_af_dev ? dev_get_priv(rvu_af_dev) : NULL; +} + +void rvu_get_lfid_for_pf(int pf, int *nixid, int *npaid) +{ + union nixx_af_rvu_lf_cfg_debug nix_lf_dbg; + union npa_af_rvu_lf_cfg_debug npa_lf_dbg; + union rvu_pf_func_s pf_func; + struct rvu_af *af = dev_get_priv(rvu_af_dev); + struct nix_af *nix_af = af->nix_af; + + pf_func.u = 0; + pf_func.s.pf = pf; + + nix_lf_dbg.u = 0; + nix_lf_dbg.s.pf_func = pf_func.u & 0xFFFF; + nix_lf_dbg.s.exec = 1; + nix_af_reg_write(nix_af, NIXX_AF_RVU_LF_CFG_DEBUG(), + nix_lf_dbg.u); + do { + nix_lf_dbg.u = nix_af_reg_read(nix_af, + NIXX_AF_RVU_LF_CFG_DEBUG()); + } while (nix_lf_dbg.s.exec); + + if (nix_lf_dbg.s.lf_valid) + *nixid = nix_lf_dbg.s.lf; + + debug("%s: nix lf_valid %d lf %d nixid %d\n", __func__, + nix_lf_dbg.s.lf_valid, nix_lf_dbg.s.lf, *nixid); + + npa_lf_dbg.u = 0; + npa_lf_dbg.s.pf_func = pf_func.u & 0xFFFF; + npa_lf_dbg.s.exec = 1; + npa_af_reg_write(nix_af->npa_af, NPA_AF_RVU_LF_CFG_DEBUG(), + npa_lf_dbg.u); + do { + npa_lf_dbg.u = npa_af_reg_read(nix_af->npa_af, + NPA_AF_RVU_LF_CFG_DEBUG()); + } while (npa_lf_dbg.s.exec); + + if (npa_lf_dbg.s.lf_valid) + *npaid = npa_lf_dbg.s.lf; + debug("%s: npa lf_valid %d lf %d npaid %d\n", __func__, + npa_lf_dbg.s.lf_valid, npa_lf_dbg.s.lf, *npaid); +} + +struct nix_af *rvu_af_init(struct rvu_af *rvu_af) +{ + struct nix_af *nix_af; + union rvu_af_addr_s block_addr; + int err; + + nix_af = (struct nix_af *)calloc(1, sizeof(struct nix_af)); + if (!nix_af) { + printf("%s: out of memory\n", __func__); + goto error; + } + + nix_af->dev = rvu_af->dev; + + block_addr.u = 0; + block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0); + nix_af->nix_af_base = rvu_af->af_base + block_addr.u; + + nix_af->npa_af = (struct npa_af *)calloc(1, sizeof(struct npa_af)); + if (!nix_af->npa_af) { + printf("%s: out of memory\n", __func__); + goto error; + } + + block_addr.u = 0; + block_addr.s.block = RVU_BLOCK_ADDR_E_NPA; + nix_af->npa_af->npa_af_base = rvu_af->af_base + block_addr.u; + + block_addr.u = 0; + block_addr.s.block = RVU_BLOCK_ADDR_E_NPC; + nix_af->npc_af_base = rvu_af->af_base + block_addr.u; + + debug("%s: Setting up npa admin\n", __func__); + err = npa_af_setup(nix_af->npa_af); + if (err) { + printf("%s: Error %d setting up NPA admin\n", __func__, err); + goto error; + } + debug("%s: Setting up nix af\n", __func__); + err = nix_af_setup(nix_af); + if (err) { + printf("%s: Error %d setting up NIX admin\n", __func__, err); + goto error; + } + debug("%s: nix_af: %p\n", __func__, nix_af); + return nix_af; + +error: + if (nix_af->npa_af) { + free(nix_af->npa_af); + memset(nix_af, 0, sizeof(*nix_af)); + } + if (nix_af) + free(nix_af); + return NULL; +} + +int rvu_af_probe(struct udevice *dev) +{ + struct rvu_af *af_ptr = dev_get_priv(dev); + + af_ptr->af_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, + PCI_REGION_MEM); + debug("%s RVU AF BAR %p\n", __func__, af_ptr->af_base); + af_ptr->dev = dev; + rvu_af_dev = dev; + + af_ptr->nix_af = rvu_af_init(af_ptr); + if (!af_ptr->nix_af) { + printf("%s: Error: could not initialize NIX AF\n", __func__); + return -1; + } + debug("%s: Done\n", __func__); + + return 0; +} + +int rvu_af_remove(struct udevice *dev) +{ + struct rvu_af *rvu_af = dev_get_priv(dev); + + nix_af_shutdown(rvu_af->nix_af); + npa_af_shutdown(rvu_af->nix_af->npa_af); + npc_af_shutdown(rvu_af->nix_af); + + debug("%s: rvu af down --\n", __func__); + return 0; +} + +U_BOOT_DRIVER(rvu_af) = { + .name = "rvu_af", + .id = UCLASS_MISC, + .probe = rvu_af_probe, + .remove = rvu_af_remove, + .priv_auto_alloc_size = sizeof(struct rvu_af), +}; + +static struct pci_device_id rvu_af_supported[] = { + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RVU_AF) }, + {} +}; + +U_BOOT_PCI_DEVICE(rvu_af, rvu_af_supported); diff --git a/drivers/net/octeontx2/rvu_common.c b/drivers/net/octeontx2/rvu_common.c new file mode 100644 index 00000000000..173b28ba4bb --- /dev/null +++ b/drivers/net/octeontx2/rvu_common.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <errno.h> +#include <malloc.h> +#include <misc.h> +#include <net.h> +#include <asm/io.h> + +#include "rvu.h" + +int qmem_alloc(struct qmem *q, u32 qsize, size_t entry_sz) +{ + q->base = memalign(CONFIG_SYS_CACHELINE_SIZE, qsize * entry_sz); + if (!q->base) + return -ENOMEM; + q->entry_sz = entry_sz; + q->qsize = qsize; + q->alloc_sz = (size_t)qsize * entry_sz; + q->iova = (dma_addr_t)(q->base); + debug("NIX: qmem alloc for (%d * %d = %ld bytes) at %p\n", + q->qsize, q->entry_sz, q->alloc_sz, q->base); + return 0; +} + +void qmem_free(struct qmem *q) +{ + if (q->base) + free(q->base); + memset(q, 0, sizeof(*q)); +} + +/** + * Allocates an admin queue for instructions and results + * + * @param aq admin queue to allocate for + * @param qsize Number of entries in the queue + * @param inst_size Size of each instruction + * @param res_size Size of each result + * + * @return -ENOMEM on error, 0 on success + */ +int rvu_aq_alloc(struct admin_queue *aq, unsigned int qsize, + size_t inst_size, size_t res_size) +{ + int err; + + err = qmem_alloc(&aq->inst, qsize, inst_size); + if (err) + return err; + err = qmem_alloc(&aq->res, qsize, res_size); + if (err) + qmem_free(&aq->inst); + + return err; +} + +/** + * Frees an admin queue + * + * @param aq Admin queue to free + */ +void rvu_aq_free(struct admin_queue *aq) +{ + qmem_free(&aq->inst); + qmem_free(&aq->res); + memset(aq, 0, sizeof(*aq)); +} diff --git a/drivers/net/octeontx2/rvu_pf.c b/drivers/net/octeontx2/rvu_pf.c new file mode 100644 index 00000000000..201ecf2c168 --- /dev/null +++ b/drivers/net/octeontx2/rvu_pf.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#include <dm.h> +#include <errno.h> +#include <malloc.h> +#include <misc.h> +#include <net.h> +#include <pci_ids.h> +#include <asm/io.h> +#include <asm/types.h> +#include <asm/arch/board.h> +#include "cgx.h" +#include "nix.h" + +extern struct udevice *rvu_af_dev; + +int rvu_pf_init(struct rvu_pf *rvu) +{ + struct nix *nix; + struct eth_pdata *pdata = dev_get_platdata(rvu->dev); + + debug("%s: Allocating nix lf\n", __func__); + nix = nix_lf_alloc(rvu->dev); + if (!nix) { + printf("%s: Error allocating lf for pf %d\n", + __func__, rvu->pfid); + return -1; + } + rvu->nix = nix; + + /* to make post_probe happy */ + if (is_valid_ethaddr(nix->lmac->mac_addr)) { + memcpy(pdata->enetaddr, nix->lmac->mac_addr, 6); + eth_env_set_enetaddr_by_index("eth", rvu->dev->seq, + pdata->enetaddr); + } + + return 0; +} + +static const struct eth_ops nix_eth_ops = { + .start = nix_lf_init, + .send = nix_lf_xmit, + .recv = nix_lf_recv, + .free_pkt = nix_lf_free_pkt, + .stop = nix_lf_halt, + .write_hwaddr = nix_lf_setup_mac, +}; + +int rvu_pf_probe(struct udevice *dev) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + int err; + char name[16]; + + debug("%s: name: %s\n", __func__, dev->name); + + rvu->pf_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_2, PCI_REGION_MEM); + rvu->pfid = dev->seq + 1; // RVU PF's start from 1; + rvu->dev = dev; + if (!rvu_af_dev) { + printf("%s: Error: Could not find RVU AF device\n", + __func__); + return -1; + } + rvu->afdev = rvu_af_dev; + + debug("RVU PF %u BAR2 %p\n", rvu->pfid, rvu->pf_base); + + rvu_get_lfid_for_pf(rvu->pfid, &rvu->nix_lfid, &rvu->npa_lfid); + + err = rvu_pf_init(rvu); + if (err) + printf("%s: Error %d adding nix\n", __func__, err); + + /* + * modify device name to include index/sequence number, + * for better readability, this is 1:1 mapping with eth0/1/2.. names. + */ + sprintf(name, "rvu_pf#%d", dev->seq); + device_set_name(dev, name); + debug("%s: name: %s\n", __func__, dev->name); + return err; +} + +int rvu_pf_remove(struct udevice *dev) +{ + struct rvu_pf *rvu = dev_get_priv(dev); + + nix_lf_shutdown(rvu->nix); + npa_lf_shutdown(rvu->nix); + + debug("%s: rvu pf%d down --\n", __func__, rvu->pfid); + + return 0; +} + +U_BOOT_DRIVER(rvu_pf) = { + .name = "rvu_pf", + .id = UCLASS_ETH, + .probe = rvu_pf_probe, + .remove = rvu_pf_remove, + .ops = &nix_eth_ops, + .priv_auto_alloc_size = sizeof(struct rvu_pf), + .platdata_auto_alloc_size = sizeof(struct eth_pdata), +}; + +static struct pci_device_id rvu_pf_supported[] = { + { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RVU_PF) }, + {} +}; + +U_BOOT_PCI_DEVICE(rvu_pf, rvu_pf_supported); diff --git a/drivers/net/sun8i_emac.c b/drivers/net/sun8i_emac.c index 1dae81c7bf8..4524604126c 100644 --- a/drivers/net/sun8i_emac.c +++ b/drivers/net/sun8i_emac.c @@ -29,6 +29,7 @@ #include <net.h> #include <reset.h> #include <dt-bindings/pinctrl/sun4i-a10.h> +#include <wait_bit.h> #if CONFIG_IS_ENABLED(DM_GPIO) #include <asm-generic/gpio.h> #endif @@ -40,6 +41,11 @@ #define MDIO_CMD_MII_PHY_REG_ADDR_SHIFT 4 #define MDIO_CMD_MII_PHY_ADDR_MASK 0x0001f000 #define MDIO_CMD_MII_PHY_ADDR_SHIFT 12 +#define MDIO_CMD_MII_CLK_CSR_DIV_16 0x0 +#define MDIO_CMD_MII_CLK_CSR_DIV_32 0x1 +#define MDIO_CMD_MII_CLK_CSR_DIV_64 0x2 +#define MDIO_CMD_MII_CLK_CSR_DIV_128 0x3 +#define MDIO_CMD_MII_CLK_CSR_SHIFT 20 #define CONFIG_TX_DESCR_NUM 32 #define CONFIG_RX_DESCR_NUM 32 @@ -84,15 +90,32 @@ /* H3/A64 EMAC Register's offset */ #define EMAC_CTL0 0x00 +#define EMAC_CTL0_FULL_DUPLEX BIT(0) +#define EMAC_CTL0_SPEED_MASK GENMASK(3, 2) +#define EMAC_CTL0_SPEED_10 (0x2 << 2) +#define EMAC_CTL0_SPEED_100 (0x3 << 2) +#define EMAC_CTL0_SPEED_1000 (0x0 << 2) #define EMAC_CTL1 0x04 +#define EMAC_CTL1_SOFT_RST BIT(0) +#define EMAC_CTL1_BURST_LEN_SHIFT 24 #define EMAC_INT_STA 0x08 #define EMAC_INT_EN 0x0c #define EMAC_TX_CTL0 0x10 +#define EMAC_TX_CTL0_TX_EN BIT(31) #define EMAC_TX_CTL1 0x14 +#define EMAC_TX_CTL1_TX_MD BIT(1) +#define EMAC_TX_CTL1_TX_DMA_EN BIT(30) +#define EMAC_TX_CTL1_TX_DMA_START BIT(31) #define EMAC_TX_FLOW_CTL 0x1c #define EMAC_TX_DMA_DESC 0x20 #define EMAC_RX_CTL0 0x24 +#define EMAC_RX_CTL0_RX_EN BIT(31) #define EMAC_RX_CTL1 0x28 +#define EMAC_RX_CTL1_RX_MD BIT(1) +#define EMAC_RX_CTL1_RX_RUNT_FRM BIT(2) +#define EMAC_RX_CTL1_RX_ERR_FRM BIT(3) +#define EMAC_RX_CTL1_RX_DMA_EN BIT(30) +#define EMAC_RX_CTL1_RX_DMA_START BIT(31) #define EMAC_RX_DMA_DESC 0x34 #define EMAC_MII_CMD 0x48 #define EMAC_MII_DATA 0x4c @@ -104,6 +127,13 @@ #define EMAC_RX_DMA_STA 0xc0 #define EMAC_RX_CUR_DESC 0xc4 +#define EMAC_DESC_OWN_DMA BIT(31) +#define EMAC_DESC_LAST_DESC BIT(30) +#define EMAC_DESC_FIRST_DESC BIT(29) +#define EMAC_DESC_CHAIN_SECOND BIT(24) + +#define EMAC_DESC_RX_ERROR_MASK 0x400068db + DECLARE_GLOBAL_DATA_PTR; enum emac_variant { @@ -116,7 +146,7 @@ enum emac_variant { struct emac_dma_desc { u32 status; - u32 st; + u32 ctl_size; u32 buf_addr; u32 next; } __aligned(ARCH_DMA_MINALIGN); @@ -166,32 +196,31 @@ static int sun8i_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) { struct udevice *dev = bus->priv; struct emac_eth_dev *priv = dev_get_priv(dev); - ulong start; - u32 miiaddr = 0; - int timeout = CONFIG_MDIO_TIMEOUT; + u32 mii_cmd; + int ret; - miiaddr &= ~MDIO_CMD_MII_WRITE; - miiaddr &= ~MDIO_CMD_MII_PHY_REG_ADDR_MASK; - miiaddr |= (reg << MDIO_CMD_MII_PHY_REG_ADDR_SHIFT) & + mii_cmd = (reg << MDIO_CMD_MII_PHY_REG_ADDR_SHIFT) & MDIO_CMD_MII_PHY_REG_ADDR_MASK; - - miiaddr &= ~MDIO_CMD_MII_PHY_ADDR_MASK; - - miiaddr |= (addr << MDIO_CMD_MII_PHY_ADDR_SHIFT) & + mii_cmd |= (addr << MDIO_CMD_MII_PHY_ADDR_SHIFT) & MDIO_CMD_MII_PHY_ADDR_MASK; - miiaddr |= MDIO_CMD_MII_BUSY; + /* + * The EMAC clock is either 200 or 300 MHz, so we need a divider + * of 128 to get the MDIO frequency below the required 2.5 MHz. + */ + mii_cmd |= MDIO_CMD_MII_CLK_CSR_DIV_128 << MDIO_CMD_MII_CLK_CSR_SHIFT; - writel(miiaddr, priv->mac_reg + EMAC_MII_CMD); + mii_cmd |= MDIO_CMD_MII_BUSY; - start = get_timer(0); - while (get_timer(start) < timeout) { - if (!(readl(priv->mac_reg + EMAC_MII_CMD) & MDIO_CMD_MII_BUSY)) - return readl(priv->mac_reg + EMAC_MII_DATA); - udelay(10); - }; + writel(mii_cmd, priv->mac_reg + EMAC_MII_CMD); - return -1; + ret = wait_for_bit_le32(priv->mac_reg + EMAC_MII_CMD, + MDIO_CMD_MII_BUSY, false, + CONFIG_MDIO_TIMEOUT, true); + if (ret < 0) + return ret; + + return readl(priv->mac_reg + EMAC_MII_DATA); } static int sun8i_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, @@ -199,39 +228,35 @@ static int sun8i_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, { struct udevice *dev = bus->priv; struct emac_eth_dev *priv = dev_get_priv(dev); - ulong start; - u32 miiaddr = 0; - int ret = -1, timeout = CONFIG_MDIO_TIMEOUT; + u32 mii_cmd; - miiaddr &= ~MDIO_CMD_MII_PHY_REG_ADDR_MASK; - miiaddr |= (reg << MDIO_CMD_MII_PHY_REG_ADDR_SHIFT) & + mii_cmd = (reg << MDIO_CMD_MII_PHY_REG_ADDR_SHIFT) & MDIO_CMD_MII_PHY_REG_ADDR_MASK; - - miiaddr &= ~MDIO_CMD_MII_PHY_ADDR_MASK; - miiaddr |= (addr << MDIO_CMD_MII_PHY_ADDR_SHIFT) & + mii_cmd |= (addr << MDIO_CMD_MII_PHY_ADDR_SHIFT) & MDIO_CMD_MII_PHY_ADDR_MASK; - miiaddr |= MDIO_CMD_MII_WRITE; - miiaddr |= MDIO_CMD_MII_BUSY; + /* + * The EMAC clock is either 200 or 300 MHz, so we need a divider + * of 128 to get the MDIO frequency below the required 2.5 MHz. + */ + mii_cmd |= MDIO_CMD_MII_CLK_CSR_DIV_128 << MDIO_CMD_MII_CLK_CSR_SHIFT; - writel(val, priv->mac_reg + EMAC_MII_DATA); - writel(miiaddr, priv->mac_reg + EMAC_MII_CMD); + mii_cmd |= MDIO_CMD_MII_WRITE; + mii_cmd |= MDIO_CMD_MII_BUSY; - start = get_timer(0); - while (get_timer(start) < timeout) { - if (!(readl(priv->mac_reg + EMAC_MII_CMD) & - MDIO_CMD_MII_BUSY)) { - ret = 0; - break; - } - udelay(10); - }; + writel(val, priv->mac_reg + EMAC_MII_DATA); + writel(mii_cmd, priv->mac_reg + EMAC_MII_CMD); - return ret; + return wait_for_bit_le32(priv->mac_reg + EMAC_MII_CMD, + MDIO_CMD_MII_BUSY, false, + CONFIG_MDIO_TIMEOUT, true); } -static int _sun8i_write_hwaddr(struct emac_eth_dev *priv, u8 *mac_id) +static int sun8i_eth_write_hwaddr(struct udevice *dev) { + struct emac_eth_dev *priv = dev_get_priv(dev); + struct eth_pdata *pdata = dev_get_platdata(dev); + uchar *mac_id = pdata->enetaddr; u32 macid_lo, macid_hi; macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) + @@ -252,21 +277,21 @@ static void sun8i_adjust_link(struct emac_eth_dev *priv, v = readl(priv->mac_reg + EMAC_CTL0); if (phydev->duplex) - v |= BIT(0); + v |= EMAC_CTL0_FULL_DUPLEX; else - v &= ~BIT(0); + v &= ~EMAC_CTL0_FULL_DUPLEX; - v &= ~0x0C; + v &= ~EMAC_CTL0_SPEED_MASK; switch (phydev->speed) { case 1000: + v |= EMAC_CTL0_SPEED_1000; break; case 100: - v |= BIT(2); - v |= BIT(3); + v |= EMAC_CTL0_SPEED_100; break; case 10: - v |= BIT(3); + v |= EMAC_CTL0_SPEED_10; break; } writel(v, priv->mac_reg + EMAC_CTL0); @@ -372,24 +397,36 @@ static int sun8i_phy_init(struct emac_eth_dev *priv, void *dev) return 0; } +#define cache_clean_descriptor(desc) \ + flush_dcache_range((uintptr_t)(desc), \ + (uintptr_t)(desc) + sizeof(struct emac_dma_desc)) + +#define cache_inv_descriptor(desc) \ + invalidate_dcache_range((uintptr_t)(desc), \ + (uintptr_t)(desc) + sizeof(struct emac_dma_desc)) + static void rx_descs_init(struct emac_eth_dev *priv) { struct emac_dma_desc *desc_table_p = &priv->rx_chain[0]; char *rxbuffs = &priv->rxbuffer[0]; struct emac_dma_desc *desc_p; - u32 idx; - - /* flush Rx buffers */ - flush_dcache_range((uintptr_t)rxbuffs, (ulong)rxbuffs + - RX_TOTAL_BUFSIZE); - - for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) { - desc_p = &desc_table_p[idx]; - desc_p->buf_addr = (uintptr_t)&rxbuffs[idx * CONFIG_ETH_BUFSIZE] - ; - desc_p->next = (uintptr_t)&desc_table_p[idx + 1]; - desc_p->st |= CONFIG_ETH_RXSIZE; - desc_p->status = BIT(31); + int i; + + /* + * Make sure we don't have dirty cache lines around, which could + * be cleaned to DRAM *after* the MAC has already written data to it. + */ + invalidate_dcache_range((uintptr_t)desc_table_p, + (uintptr_t)desc_table_p + sizeof(priv->rx_chain)); + invalidate_dcache_range((uintptr_t)rxbuffs, + (uintptr_t)rxbuffs + sizeof(priv->rxbuffer)); + + for (i = 0; i < CONFIG_RX_DESCR_NUM; i++) { + desc_p = &desc_table_p[i]; + desc_p->buf_addr = (uintptr_t)&rxbuffs[i * CONFIG_ETH_BUFSIZE]; + desc_p->next = (uintptr_t)&desc_table_p[i + 1]; + desc_p->ctl_size = CONFIG_ETH_RXSIZE; + desc_p->status = EMAC_DESC_OWN_DMA; } /* Correcting the last pointer of the chain */ @@ -408,87 +445,74 @@ static void tx_descs_init(struct emac_eth_dev *priv) struct emac_dma_desc *desc_table_p = &priv->tx_chain[0]; char *txbuffs = &priv->txbuffer[0]; struct emac_dma_desc *desc_p; - u32 idx; - - for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) { - desc_p = &desc_table_p[idx]; - desc_p->buf_addr = (uintptr_t)&txbuffs[idx * CONFIG_ETH_BUFSIZE] - ; - desc_p->next = (uintptr_t)&desc_table_p[idx + 1]; - desc_p->status = (1 << 31); - desc_p->st = 0; + int i; + + for (i = 0; i < CONFIG_TX_DESCR_NUM; i++) { + desc_p = &desc_table_p[i]; + desc_p->buf_addr = (uintptr_t)&txbuffs[i * CONFIG_ETH_BUFSIZE]; + desc_p->next = (uintptr_t)&desc_table_p[i + 1]; + desc_p->ctl_size = 0; + desc_p->status = 0; } /* Correcting the last pointer of the chain */ desc_p->next = (uintptr_t)&desc_table_p[0]; - /* Flush all Tx buffer descriptors */ - flush_dcache_range((uintptr_t)priv->tx_chain, - (uintptr_t)priv->tx_chain + - sizeof(priv->tx_chain)); + /* Flush the first TX buffer descriptor we will tell the MAC about. */ + cache_clean_descriptor(desc_table_p); writel((uintptr_t)&desc_table_p[0], priv->mac_reg + EMAC_TX_DMA_DESC); priv->tx_currdescnum = 0; } -static int _sun8i_emac_eth_init(struct emac_eth_dev *priv, u8 *enetaddr) +static int sun8i_emac_eth_start(struct udevice *dev) { - u32 reg, v; - int timeout = 100; - - reg = readl((priv->mac_reg + EMAC_CTL1)); - - if (!(reg & 0x1)) { - /* Soft reset MAC */ - setbits_le32((priv->mac_reg + EMAC_CTL1), 0x1); - do { - reg = readl(priv->mac_reg + EMAC_CTL1); - } while ((reg & 0x01) != 0 && (--timeout)); - if (!timeout) { - printf("%s: Timeout\n", __func__); - return -1; - } + struct emac_eth_dev *priv = dev_get_priv(dev); + int ret; + + /* Soft reset MAC */ + writel(EMAC_CTL1_SOFT_RST, priv->mac_reg + EMAC_CTL1); + ret = wait_for_bit_le32(priv->mac_reg + EMAC_CTL1, + EMAC_CTL1_SOFT_RST, false, 10, true); + if (ret) { + printf("%s: Timeout\n", __func__); + return ret; } /* Rewrite mac address after reset */ - _sun8i_write_hwaddr(priv, enetaddr); + sun8i_eth_write_hwaddr(dev); - v = readl(priv->mac_reg + EMAC_TX_CTL1); - /* TX_MD Transmission starts after a full frame located in TX DMA FIFO*/ - v |= BIT(1); - writel(v, priv->mac_reg + EMAC_TX_CTL1); + /* transmission starts after the full frame arrived in TX DMA FIFO */ + setbits_le32(priv->mac_reg + EMAC_TX_CTL1, EMAC_TX_CTL1_TX_MD); - v = readl(priv->mac_reg + EMAC_RX_CTL1); - /* RX_MD RX DMA reads data from RX DMA FIFO to host memory after a + /* + * RX DMA reads data from RX DMA FIFO to host memory after a * complete frame has been written to RX DMA FIFO */ - v |= BIT(1); - writel(v, priv->mac_reg + EMAC_RX_CTL1); + setbits_le32(priv->mac_reg + EMAC_RX_CTL1, EMAC_RX_CTL1_RX_MD); - /* DMA */ - writel(8 << 24, priv->mac_reg + EMAC_CTL1); + /* DMA burst length */ + writel(8 << EMAC_CTL1_BURST_LEN_SHIFT, priv->mac_reg + EMAC_CTL1); /* Initialize rx/tx descriptors */ rx_descs_init(priv); tx_descs_init(priv); /* PHY Start Up */ - phy_startup(priv->phydev); + ret = phy_startup(priv->phydev); + if (ret) + return ret; sun8i_adjust_link(priv, priv->phydev); - /* Start RX DMA */ - v = readl(priv->mac_reg + EMAC_RX_CTL1); - v |= BIT(30); - writel(v, priv->mac_reg + EMAC_RX_CTL1); - /* Start TX DMA */ - v = readl(priv->mac_reg + EMAC_TX_CTL1); - v |= BIT(30); - writel(v, priv->mac_reg + EMAC_TX_CTL1); + /* Start RX/TX DMA */ + setbits_le32(priv->mac_reg + EMAC_RX_CTL1, EMAC_RX_CTL1_RX_DMA_EN | + EMAC_RX_CTL1_RX_ERR_FRM | EMAC_RX_CTL1_RX_RUNT_FRM); + setbits_le32(priv->mac_reg + EMAC_TX_CTL1, EMAC_TX_CTL1_TX_DMA_EN); /* Enable RX/TX */ - setbits_le32(priv->mac_reg + EMAC_RX_CTL0, BIT(31)); - setbits_le32(priv->mac_reg + EMAC_TX_CTL0, BIT(31)); + setbits_le32(priv->mac_reg + EMAC_RX_CTL0, EMAC_RX_CTL0_RX_EN); + setbits_le32(priv->mac_reg + EMAC_TX_CTL0, EMAC_TX_CTL0_TX_EN); return 0; } @@ -558,88 +582,71 @@ static int parse_phy_pins(struct udevice *dev) return 0; } -static int _sun8i_eth_recv(struct emac_eth_dev *priv, uchar **packetp) +static int sun8i_emac_eth_recv(struct udevice *dev, int flags, uchar **packetp) { + struct emac_eth_dev *priv = dev_get_priv(dev); u32 status, desc_num = priv->rx_currdescnum; struct emac_dma_desc *desc_p = &priv->rx_chain[desc_num]; - int length = -EAGAIN; - int good_packet = 1; - uintptr_t desc_start = (uintptr_t)desc_p; - uintptr_t desc_end = desc_start + - roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN); - - ulong data_start = (uintptr_t)desc_p->buf_addr; - ulong data_end; + uintptr_t data_start = (uintptr_t)desc_p->buf_addr; + int length; /* Invalidate entire buffer descriptor */ - invalidate_dcache_range(desc_start, desc_end); + cache_inv_descriptor(desc_p); status = desc_p->status; /* Check for DMA own bit */ - if (!(status & BIT(31))) { - length = (desc_p->status >> 16) & 0x3FFF; + if (status & EMAC_DESC_OWN_DMA) + return -EAGAIN; - if (length < 0x40) { - good_packet = 0; - debug("RX: Bad Packet (runt)\n"); - } + length = (status >> 16) & 0x3fff; - data_end = data_start + length; - /* Invalidate received data */ - invalidate_dcache_range(rounddown(data_start, - ARCH_DMA_MINALIGN), - roundup(data_end, - ARCH_DMA_MINALIGN)); - if (good_packet) { - if (length > CONFIG_ETH_RXSIZE) { - printf("Received packet is too big (len=%d)\n", - length); - return -EMSGSIZE; - } - *packetp = (uchar *)(ulong)desc_p->buf_addr; - return length; - } + /* make sure we read from DRAM, not our cache */ + invalidate_dcache_range(data_start, + data_start + roundup(length, ARCH_DMA_MINALIGN)); + + if (status & EMAC_DESC_RX_ERROR_MASK) { + debug("RX: packet error: 0x%x\n", + status & EMAC_DESC_RX_ERROR_MASK); + return 0; + } + if (length < 0x40) { + debug("RX: Bad Packet (runt)\n"); + return 0; } + if (length > CONFIG_ETH_RXSIZE) { + debug("RX: Too large packet (%d bytes)\n", length); + return 0; + } + + *packetp = (uchar *)(ulong)desc_p->buf_addr; + return length; } -static int _sun8i_emac_eth_send(struct emac_eth_dev *priv, void *packet, - int len) +static int sun8i_emac_eth_send(struct udevice *dev, void *packet, int length) { - u32 v, desc_num = priv->tx_currdescnum; + struct emac_eth_dev *priv = dev_get_priv(dev); + u32 desc_num = priv->tx_currdescnum; struct emac_dma_desc *desc_p = &priv->tx_chain[desc_num]; - uintptr_t desc_start = (uintptr_t)desc_p; - uintptr_t desc_end = desc_start + - roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN); - uintptr_t data_start = (uintptr_t)desc_p->buf_addr; uintptr_t data_end = data_start + - roundup(len, ARCH_DMA_MINALIGN); - - /* Invalidate entire buffer descriptor */ - invalidate_dcache_range(desc_start, desc_end); + roundup(length, ARCH_DMA_MINALIGN); - desc_p->st = len; - /* Mandatory undocumented bit */ - desc_p->st |= BIT(24); + desc_p->ctl_size = length | EMAC_DESC_CHAIN_SECOND; - memcpy((void *)data_start, packet, len); + memcpy((void *)data_start, packet, length); /* Flush data to be sent */ flush_dcache_range(data_start, data_end); - /* frame end */ - desc_p->st |= BIT(30); - desc_p->st |= BIT(31); + /* frame begin and end */ + desc_p->ctl_size |= EMAC_DESC_LAST_DESC | EMAC_DESC_FIRST_DESC; + desc_p->status = EMAC_DESC_OWN_DMA; - /*frame begin */ - desc_p->st |= BIT(29); - desc_p->status = BIT(31); - - /*Descriptors st and status field has changed, so FLUSH it */ - flush_dcache_range(desc_start, desc_end); + /* make sure the MAC reads the actual data from DRAM */ + cache_clean_descriptor(desc_p); /* Move to next Descriptor and wrap around */ if (++desc_num >= CONFIG_TX_DESCR_NUM) @@ -647,20 +654,14 @@ static int _sun8i_emac_eth_send(struct emac_eth_dev *priv, void *packet, priv->tx_currdescnum = desc_num; /* Start the DMA */ - v = readl(priv->mac_reg + EMAC_TX_CTL1); - v |= BIT(31);/* mandatory */ - v |= BIT(30);/* mandatory */ - writel(v, priv->mac_reg + EMAC_TX_CTL1); - - return 0; -} + setbits_le32(priv->mac_reg + EMAC_TX_CTL1, EMAC_TX_CTL1_TX_DMA_START); -static int sun8i_eth_write_hwaddr(struct udevice *dev) -{ - struct eth_pdata *pdata = dev_get_platdata(dev); - struct emac_eth_dev *priv = dev_get_priv(dev); + /* + * Since we copied the data above, we return here without waiting + * for the packet to be actually send out. + */ - return _sun8i_write_hwaddr(priv, pdata->enetaddr); + return 0; } static int sun8i_emac_board_setup(struct udevice *dev, @@ -760,40 +761,18 @@ static int sun8i_mdio_init(const char *name, struct udevice *priv) return mdio_register(bus); } -static int sun8i_emac_eth_start(struct udevice *dev) -{ - struct eth_pdata *pdata = dev_get_platdata(dev); - - return _sun8i_emac_eth_init(dev->priv, pdata->enetaddr); -} - -static int sun8i_emac_eth_send(struct udevice *dev, void *packet, int length) -{ - struct emac_eth_dev *priv = dev_get_priv(dev); - - return _sun8i_emac_eth_send(priv, packet, length); -} - -static int sun8i_emac_eth_recv(struct udevice *dev, int flags, uchar **packetp) +static int sun8i_eth_free_pkt(struct udevice *dev, uchar *packet, + int length) { struct emac_eth_dev *priv = dev_get_priv(dev); - - return _sun8i_eth_recv(priv, packetp); -} - -static int _sun8i_free_pkt(struct emac_eth_dev *priv) -{ u32 desc_num = priv->rx_currdescnum; struct emac_dma_desc *desc_p = &priv->rx_chain[desc_num]; - uintptr_t desc_start = (uintptr_t)desc_p; - uintptr_t desc_end = desc_start + - roundup(sizeof(u32), ARCH_DMA_MINALIGN); - /* Make the current descriptor valid again */ - desc_p->status |= BIT(31); + /* give the current descriptor back to the MAC */ + desc_p->status |= EMAC_DESC_OWN_DMA; /* Flush Status field of descriptor */ - flush_dcache_range(desc_start, desc_end); + cache_clean_descriptor(desc_p); /* Move to next desc and wrap-around condition. */ if (++desc_num >= CONFIG_RX_DESCR_NUM) @@ -803,24 +782,17 @@ static int _sun8i_free_pkt(struct emac_eth_dev *priv) return 0; } -static int sun8i_eth_free_pkt(struct udevice *dev, uchar *packet, - int length) -{ - struct emac_eth_dev *priv = dev_get_priv(dev); - - return _sun8i_free_pkt(priv); -} - static void sun8i_emac_eth_stop(struct udevice *dev) { struct emac_eth_dev *priv = dev_get_priv(dev); /* Stop Rx/Tx transmitter */ - clrbits_le32(priv->mac_reg + EMAC_RX_CTL0, BIT(31)); - clrbits_le32(priv->mac_reg + EMAC_TX_CTL0, BIT(31)); + clrbits_le32(priv->mac_reg + EMAC_RX_CTL0, EMAC_RX_CTL0_RX_EN); + clrbits_le32(priv->mac_reg + EMAC_TX_CTL0, EMAC_TX_CTL0_TX_EN); - /* Stop TX DMA */ - clrbits_le32(priv->mac_reg + EMAC_TX_CTL1, BIT(30)); + /* Stop RX/TX DMA */ + clrbits_le32(priv->mac_reg + EMAC_TX_CTL1, EMAC_TX_CTL1_TX_DMA_EN); + clrbits_le32(priv->mac_reg + EMAC_RX_CTL1, EMAC_RX_CTL1_RX_DMA_EN); phy_shutdown(priv->phydev); } @@ -855,47 +827,30 @@ static const struct eth_ops sun8i_emac_eth_ops = { .stop = sun8i_emac_eth_stop, }; -static int sun8i_get_ephy_nodes(struct udevice *dev, struct emac_eth_dev *priv) +static int sun8i_handle_internal_phy(struct udevice *dev, struct emac_eth_dev *priv) { - int emac_node, ephy_node, ret, ephy_handle; + struct ofnode_phandle_args phandle; + int ret; - emac_node = fdt_path_offset(gd->fdt_blob, - "/soc/ethernet@1c30000"); - if (emac_node < 0) { - debug("failed to get emac node\n"); - return emac_node; - } - ephy_handle = fdtdec_lookup_phandle(gd->fdt_blob, - emac_node, "phy-handle"); - - /* look for mdio-mux node for internal PHY node */ - ephy_node = fdt_path_offset(gd->fdt_blob, - "/soc/ethernet@1c30000/mdio-mux/mdio@1/ethernet-phy@1"); - if (ephy_node < 0) { - debug("failed to get mdio-mux with internal PHY\n"); - return ephy_node; - } + ret = ofnode_parse_phandle_with_args(dev_ofnode(dev), "phy-handle", + NULL, 0, 0, &phandle); + if (ret) + return ret; - /* This is not the phy we are looking for */ - if (ephy_node != ephy_handle) + /* If the PHY node is not a child of the internal MDIO bus, we are + * using some external PHY. + */ + if (!ofnode_device_is_compatible(ofnode_get_parent(phandle.node), + "allwinner,sun8i-h3-mdio-internal")) return 0; - ret = fdt_node_check_compatible(gd->fdt_blob, ephy_node, - "allwinner,sun8i-h3-mdio-internal"); - if (ret < 0) { - debug("failed to find mdio-internal node\n"); - return ret; - } - - ret = clk_get_by_index_nodev(offset_to_ofnode(ephy_node), 0, - &priv->ephy_clk); + ret = clk_get_by_index_nodev(phandle.node, 0, &priv->ephy_clk); if (ret) { dev_err(dev, "failed to get EPHY TX clock\n"); return ret; } - ret = reset_get_by_index_nodev(offset_to_ofnode(ephy_node), 0, - &priv->ephy_rst); + ret = reset_get_by_index_nodev(phandle.node, 0, &priv->ephy_rst); if (ret) { dev_err(dev, "failed to get EPHY TX reset\n"); return ret; @@ -987,7 +942,7 @@ static int sun8i_emac_eth_ofdata_to_platdata(struct udevice *dev) } if (priv->variant == H3_EMAC) { - ret = sun8i_get_ephy_nodes(dev, priv); + ret = sun8i_handle_internal_phy(dev, priv); if (ret) return ret; } diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index dd1cc652290..af927849508 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -179,6 +179,18 @@ config PCIE_LAYERSCAPE_RC configured to Root Complex mode by clearing the corresponding bit of RCW[HOST_AGT_PEX]. +config PCI_IOMMU_EXTRA_MAPPINGS + bool "Support for specifying extra IOMMU mappings for PCI" + depends on PCIE_LAYERSCAPE_RC + help + Enable support for specifying extra IOMMU mappings for PCI + controllers through a special env var called "pci_iommu_extra" or + through a device tree property named "pci-iommu-extra" placed in + the node describing the PCI controller. + The intent is to cover SR-IOV scenarios which need mappings for VFs + and PCI hot-plug scenarios. More documentation can be found under: + arch/arm/cpu/armv8/fsl-layerscape/doc/README.pci_iommu_extra + config PCIE_LAYERSCAPE_EP bool "Layerscape PCIe Endpoint mode support" depends on DM_PCI diff --git a/drivers/pci/pcie_fsl.c b/drivers/pci/pcie_fsl.c index ab33459e28e..fb50b8f5180 100644 --- a/drivers/pci/pcie_fsl.c +++ b/drivers/pci/pcie_fsl.c @@ -396,6 +396,19 @@ static int fsl_pcie_init_atmu(struct fsl_pcie *pcie) return 0; } +static void fsl_pcie_dbi_read_only_reg_write_enable(struct fsl_pcie *pcie, + bool enable) +{ + u32 val; + + fsl_pcie_hose_read_config_dword(pcie, DBI_RO_WR_EN, &val); + if (enable) + val |= 1; + else + val &= ~1; + fsl_pcie_hose_write_config_dword(pcie, DBI_RO_WR_EN, val); +} + static int fsl_pcie_init_port(struct fsl_pcie *pcie) { ccsr_fsl_pci_t *regs = pcie->regs; @@ -470,7 +483,7 @@ static int fsl_pcie_init_port(struct fsl_pcie *pcie) * Set to 0 to protect the read-only registers. */ #ifdef CONFIG_SYS_FSL_ERRATUM_A007815 - clrbits_be32(®s->dbi_ro_wr_en, 0x01); + fsl_pcie_dbi_read_only_reg_write_enable(pcie, false); #endif /* @@ -504,13 +517,12 @@ static int fsl_pcie_init_port(struct fsl_pcie *pcie) static int fsl_pcie_fixup_classcode(struct fsl_pcie *pcie) { - ccsr_fsl_pci_t *regs = pcie->regs; u32 classcode_reg; u32 val; if (pcie->block_rev >= PEX_IP_BLK_REV_3_0) { classcode_reg = PCI_CLASS_REVISION; - setbits_be32(®s->dbi_ro_wr_en, 0x01); + fsl_pcie_dbi_read_only_reg_write_enable(pcie, true); } else { classcode_reg = CSR_CLASSCODE; } @@ -521,7 +533,7 @@ static int fsl_pcie_fixup_classcode(struct fsl_pcie *pcie) fsl_pcie_hose_write_config_dword(pcie, classcode_reg, val); if (pcie->block_rev >= PEX_IP_BLK_REV_3_0) - clrbits_be32(®s->dbi_ro_wr_en, 0x01); + fsl_pcie_dbi_read_only_reg_write_enable(pcie, false); return 0; } diff --git a/drivers/pci/pcie_fsl.h b/drivers/pci/pcie_fsl.h index dc8368d5592..70c5f4e4cff 100644 --- a/drivers/pci/pcie_fsl.h +++ b/drivers/pci/pcie_fsl.h @@ -26,6 +26,8 @@ /* PCIe Link Status Register */ #define PCI_LSR (FSL_PCIE_CAP_ID + 0x12) +#define DBI_RO_WR_EN 0x8bc + #ifndef CONFIG_SYS_PCI_MEMORY_BUS #define CONFIG_SYS_PCI_MEMORY_BUS 0 #endif diff --git a/drivers/pci/pcie_layerscape_fixup.c b/drivers/pci/pcie_layerscape_fixup.c index 1709cd3d230..c75cf26e0a5 100644 --- a/drivers/pci/pcie_layerscape_fixup.c +++ b/drivers/pci/pcie_layerscape_fixup.c @@ -19,9 +19,39 @@ #ifdef CONFIG_ARM #include <asm/arch/clock.h> #endif +#include <malloc.h> +#include <env.h> #include "pcie_layerscape.h" #include "pcie_layerscape_fixup_common.h" +static int fdt_pcie_get_nodeoffset(void *blob, struct ls_pcie_rc *pcie_rc) +{ + int nodeoffset; + uint svr; + char *compat = NULL; + + /* find pci controller node */ + nodeoffset = fdt_node_offset_by_compat_reg(blob, "fsl,ls-pcie", + pcie_rc->dbi_res.start); + if (nodeoffset < 0) { +#ifdef CONFIG_FSL_PCIE_COMPAT /* Compatible with older version of dts node */ + svr = (get_svr() >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; + if (svr == SVR_LS2088A || svr == SVR_LS2084A || + svr == SVR_LS2048A || svr == SVR_LS2044A || + svr == SVR_LS2081A || svr == SVR_LS2041A) + compat = "fsl,ls2088a-pcie"; + else + compat = CONFIG_FSL_PCIE_COMPAT; + + nodeoffset = + fdt_node_offset_by_compat_reg(blob, compat, + pcie_rc->dbi_res.start); +#endif + } + + return nodeoffset; +} + #if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2) /* * Return next available LUT index. @@ -127,30 +157,11 @@ static void fdt_pcie_set_iommu_map_entry_ls(void *blob, u32 iommu_map[4]; int nodeoffset; int lenp; - uint svr; - char *compat = NULL; struct ls_pcie *pcie = pcie_rc->pcie; - /* find pci controller node */ - nodeoffset = fdt_node_offset_by_compat_reg(blob, "fsl,ls-pcie", - pcie_rc->dbi_res.start); - if (nodeoffset < 0) { -#ifdef CONFIG_FSL_PCIE_COMPAT /* Compatible with older version of dts node */ - svr = (get_svr() >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; - if (svr == SVR_LS2088A || svr == SVR_LS2084A || - svr == SVR_LS2048A || svr == SVR_LS2044A || - svr == SVR_LS2081A || svr == SVR_LS2041A) - compat = "fsl,ls2088a-pcie"; - else - compat = CONFIG_FSL_PCIE_COMPAT; - - if (compat) - nodeoffset = fdt_node_offset_by_compat_reg(blob, - compat, pcie_rc->dbi_res.start); -#endif - if (nodeoffset < 0) - return; - } + nodeoffset = fdt_pcie_get_nodeoffset(blob, pcie_rc); + if (nodeoffset < 0) + return; /* get phandle to iommu controller */ prop = fdt_getprop_w(blob, nodeoffset, "iommu-map", &lenp); @@ -174,13 +185,323 @@ static void fdt_pcie_set_iommu_map_entry_ls(void *blob, } } +static int fdt_fixup_pcie_device_ls(void *blob, pci_dev_t bdf, + struct ls_pcie_rc *pcie_rc) +{ + int streamid, index; + + streamid = pcie_next_streamid(pcie_rc->stream_id_cur, + pcie_rc->pcie->idx); + if (streamid < 0) { + printf("ERROR: out of stream ids for BDF %d.%d.%d\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + return -ENOENT; + } + pcie_rc->stream_id_cur++; + + index = ls_pcie_next_lut_index(pcie_rc); + if (index < 0) { + printf("ERROR: out of LUT indexes for BDF %d.%d.%d\n", + PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf)); + return -ENOENT; + } + + /* map PCI b.d.f to streamID in LUT */ + ls_pcie_lut_set_mapping(pcie_rc, index, bdf >> 8, streamid); + /* update msi-map in device tree */ + fdt_pcie_set_msi_map_entry_ls(blob, pcie_rc, bdf >> 8, streamid); + /* update iommu-map in device tree */ + fdt_pcie_set_iommu_map_entry_ls(blob, pcie_rc, bdf >> 8, streamid); + + return 0; +} + +struct extra_iommu_entry { + int action; + pci_dev_t bdf; + int num_vfs; + bool noari; +}; + +#define EXTRA_IOMMU_ENTRY_HOTPLUG 1 +#define EXTRA_IOMMU_ENTRY_VFS 2 + +static struct extra_iommu_entry *get_extra_iommu_ents(void *blob, + int nodeoffset, + phys_addr_t addr, + int *cnt) +{ + const char *s, *p, *tok; + struct extra_iommu_entry *entries; + int i = 0, b, d, f; + + /* + * Retrieve extra IOMMU configuration from env var or from device tree. + * Env var is given priority. + */ + s = env_get("pci_iommu_extra"); + if (!s) { + s = fdt_getprop(blob, nodeoffset, "pci-iommu-extra", NULL); + } else { + phys_addr_t pci_base; + char *endp; + + /* + * In env var case the config string has "pci@0x..." in + * addition. Parse this part and match it by address against + * the input pci controller's registers base address. + */ + tok = s; + p = strchrnul(s + 1, ','); + s = NULL; + do { + if (!strncmp(tok, "pci", 3)) { + pci_base = simple_strtoul(tok + 4, &endp, 0); + if (pci_base == addr) { + s = endp + 1; + break; + } + } + p = strchrnul(p + 1, ','); + tok = p + 1; + } while (*p); + } + + /* + * If no env var or device tree property found or pci register base + * address mismatches, bail out + */ + if (!s) + return NULL; + + /* + * In order to find how many action entries to allocate, count number + * of actions by interating through the pairs of bdfs and actions. + */ + *cnt = 0; + p = s; + while (*p && strncmp(p, "pci", 3)) { + if (*p == ',') + (*cnt)++; + p++; + } + if (!(*p)) + (*cnt)++; + + if (!(*cnt) || (*cnt) % 2) { + printf("ERROR: invalid or odd extra iommu token count %d\n", + *cnt); + return NULL; + } + *cnt = (*cnt) / 2; + + entries = malloc((*cnt) * sizeof(*entries)); + if (!entries) { + printf("ERROR: fail to allocate extra iommu entries\n"); + return NULL; + } + + /* + * Parse action entries one by one and store the information in the + * newly allocated actions array. + */ + p = s; + while (p) { + /* Extract BDF */ + b = simple_strtoul(p, (char **)&p, 0); p++; + d = simple_strtoul(p, (char **)&p, 0); p++; + f = simple_strtoul(p, (char **)&p, 0); p++; + entries[i].bdf = PCI_BDF(b, d, f); + + /* Parse action */ + if (!strncmp(p, "hp", 2)) { + /* Hot-plug entry */ + entries[i].action = EXTRA_IOMMU_ENTRY_HOTPLUG; + p += 2; + } else if (!strncmp(p, "vfs", 3) || + !strncmp(p, "noari_vfs", 9)) { + /* VFs or VFs with ARI disabled entry */ + entries[i].action = EXTRA_IOMMU_ENTRY_VFS; + entries[i].noari = !strncmp(p, "noari_vfs", 9); + + /* + * Parse and store total number of VFs to allocate + * IOMMU entries for. + */ + p = strchr(p, '='); + entries[i].num_vfs = simple_strtoul(p + 1, (char **)&p, + 0); + if (*p) + p++; + } else { + printf("ERROR: invalid action in extra iommu entry\n"); + free(entries); + + return NULL; + } + + if (!(*p) || !strncmp(p, "pci", 3)) + break; + + i++; + } + + return entries; +} + +static void get_vf_offset_and_stride(struct udevice *dev, int sriov_pos, + struct extra_iommu_entry *entry, + u16 *offset, u16 *stride) +{ + u16 tmp16; + u32 tmp32; + bool have_ari = false; + int pos; + struct udevice *pf_dev; + + dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_TOTAL_VF, &tmp16); + if (entry->num_vfs > tmp16) { + printf("WARN: requested no. of VFs %d exceeds total of %d\n", + entry->num_vfs, tmp16); + } + + /* + * The code below implements the VF Discovery recomandations specified + * in PCIe base spec "9.2.1.2 VF Discovery", quoted below: + * + * VF Discovery + * + * The First VF Offset and VF Stride fields in the SR-IOV extended + * capability are 16-bit Routing ID offsets. These offsets are used to + * compute the Routing IDs for the VFs with the following restrictions: + * - The value in NumVFs in a PF (Section 9.3.3.7) may affect the + * values in First VF Offset (Section 9.3.3.9) and VF Stride + * (Section 9.3.3.10) of that PF. + * - The value in ARI Capable Hierarchy (Section 9.3.3.3.5) in the + * lowest-numbered PF of the Device (for example PF0) may affect + * the values in First VF Offset and VF Stride in all PFs of the + * Device. + * - NumVFs of a PF may only be changed when VF Enable + * (Section 9.3.3.3.1) of that PF is Clear. + * - ARI Capable Hierarchy (Section 9.3.3.3.5) may only be changed + * when VF Enable is Clear in all PFs of a Device. + */ + + /* Clear VF enable for all PFs */ + device_foreach_child(pf_dev, dev->parent) { + dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + &tmp16); + tmp16 &= ~PCI_SRIOV_CTRL_VFE; + dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + tmp16); + } + + /* Obtain a reference to PF0 device */ + if (dm_pci_bus_find_bdf(PCI_BDF(PCI_BUS(entry->bdf), + PCI_DEV(entry->bdf), 0), &pf_dev)) { + printf("WARN: failed to get PF0\n"); + } + + if (entry->noari) + goto skip_ari; + + /* Check that connected downstream port supports ARI Forwarding */ + pos = dm_pci_find_capability(dev->parent, PCI_CAP_ID_EXP); + dm_pci_read_config32(dev->parent, pos + PCI_EXP_DEVCAP2, &tmp32); + if (!(tmp32 & PCI_EXP_DEVCAP2_ARI)) + goto skip_ari; + + /* Check that PF supports Alternate Routing ID */ + if (!dm_pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) + goto skip_ari; + + /* Set ARI Capable Hierarcy for PF0 */ + dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, &tmp16); + tmp16 |= PCI_SRIOV_CTRL_ARI; + dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, tmp16); + have_ari = true; + +skip_ari: + if (!have_ari) { + /* + * No ARI support or disabled so clear ARI Capable Hierarcy + * for PF0 + */ + dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + &tmp16); + tmp16 &= ~PCI_SRIOV_CTRL_ARI; + dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + tmp16); + } + + /* Set requested number of VFs */ + dm_pci_write_config16(dev, sriov_pos + PCI_SRIOV_NUM_VF, + entry->num_vfs); + + /* Read VF stride and offset with the configs just made */ + dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_VF_OFFSET, offset); + dm_pci_read_config16(dev, sriov_pos + PCI_SRIOV_VF_STRIDE, stride); + + if (have_ari) { + /* Reset to default ARI Capable Hierarcy bit for PF0 */ + dm_pci_read_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + &tmp16); + tmp16 &= ~PCI_SRIOV_CTRL_ARI; + dm_pci_write_config16(pf_dev, sriov_pos + PCI_SRIOV_CTRL, + tmp16); + } + /* Reset to default the number of VFs */ + dm_pci_write_config16(dev, sriov_pos + PCI_SRIOV_NUM_VF, 0); +} + +static int fdt_fixup_pci_vfs(void *blob, struct extra_iommu_entry *entry, + struct ls_pcie_rc *pcie_rc) +{ + struct udevice *dev, *bus; + u16 vf_offset, vf_stride; + int i, sriov_pos; + pci_dev_t bdf; + + if (dm_pci_bus_find_bdf(entry->bdf, &dev)) { + printf("ERROR: BDF %d.%d.%d not found\n", PCI_BUS(entry->bdf), + PCI_DEV(entry->bdf), PCI_FUNC(entry->bdf)); + return 0; + } + + sriov_pos = dm_pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!sriov_pos) { + printf("WARN: trying to set VFs on non-SRIOV dev\n"); + return 0; + } + + get_vf_offset_and_stride(dev, sriov_pos, entry, &vf_offset, &vf_stride); + + for (bus = dev; device_is_on_pci_bus(bus);) + bus = bus->parent; + + bdf = entry->bdf - PCI_BDF(bus->seq, 0, 0) + (vf_offset << 8); + + for (i = 0; i < entry->num_vfs; i++) { + if (fdt_fixup_pcie_device_ls(blob, bdf, pcie_rc) < 0) + return -1; + bdf += vf_stride << 8; + } + + printf("Added %d iommu VF mappings for PF %d.%d.%d\n", + entry->num_vfs, PCI_BUS(entry->bdf), + PCI_DEV(entry->bdf), PCI_FUNC(entry->bdf)); + + return 0; +} + static void fdt_fixup_pcie_ls(void *blob) { struct udevice *dev, *bus; struct ls_pcie_rc *pcie_rc; - int streamid; - int index; pci_dev_t bdf; + struct extra_iommu_entry *entries; + int i, cnt, nodeoffset; + /* Scan all known buses */ for (pci_find_first_device(&dev); @@ -196,33 +517,57 @@ static void fdt_fixup_pcie_ls(void *blob) pcie_rc = dev_get_priv(bus); - streamid = pcie_next_streamid(pcie_rc->stream_id_cur, - pcie_rc->pcie->idx); - if (streamid < 0) { - debug("ERROR: no stream ids free\n"); + /* the DT fixup must be relative to the hose first_busno */ + bdf = dm_pci_get_bdf(dev) - PCI_BDF(bus->seq, 0, 0); + + if (fdt_fixup_pcie_device_ls(blob, bdf, pcie_rc) < 0) + break; + } + + if (!IS_ENABLED(CONFIG_PCI_IOMMU_EXTRA_MAPPINGS)) + goto skip; + + list_for_each_entry(pcie_rc, &ls_pcie_list, list) { + nodeoffset = fdt_pcie_get_nodeoffset(blob, pcie_rc); + if (nodeoffset < 0) { + printf("ERROR: couldn't find pci node\n"); continue; - } else { - pcie_rc->stream_id_cur++; } - index = ls_pcie_next_lut_index(pcie_rc); - if (index < 0) { - debug("ERROR: no LUT indexes free\n"); + entries = get_extra_iommu_ents(blob, nodeoffset, + pcie_rc->dbi_res.start, &cnt); + if (!entries) continue; - } - /* the DT fixup must be relative to the hose first_busno */ - bdf = dm_pci_get_bdf(dev) - PCI_BDF(bus->seq, 0, 0); - /* map PCI b.d.f to streamID in LUT */ - ls_pcie_lut_set_mapping(pcie_rc, index, bdf >> 8, - streamid); - /* update msi-map in device tree */ - fdt_pcie_set_msi_map_entry_ls(blob, pcie_rc, bdf >> 8, - streamid); - /* update iommu-map in device tree */ - fdt_pcie_set_iommu_map_entry_ls(blob, pcie_rc, bdf >> 8, - streamid); + for (i = 0; i < cnt; i++) { + if (entries[i].action == EXTRA_IOMMU_ENTRY_HOTPLUG) { + bdf = entries[i].bdf; + printf("Added iommu map for hotplug %d.%d.%d\n", + PCI_BUS(bdf), PCI_DEV(bdf), + PCI_FUNC(bdf)); + if (fdt_fixup_pcie_device_ls(blob, bdf, + pcie_rc) < 0) { + free(entries); + return; + } + } else if (entries[i].action == EXTRA_IOMMU_ENTRY_VFS) { + if (fdt_fixup_pci_vfs(blob, &entries[i], + pcie_rc) < 0) { + free(entries); + return; + } + } else { + printf("Invalid action %d for BDF %d.%d.%d\n", + entries[i].action, + PCI_BUS(entries[i].bdf), + PCI_DEV(entries[i].bdf), + PCI_FUNC(entries[i].bdf)); + } + } + free(entries); } + +skip: pcie_board_fix_fdt(blob); } #endif @@ -230,28 +575,11 @@ static void fdt_fixup_pcie_ls(void *blob) static void ft_pcie_rc_fix(void *blob, struct ls_pcie_rc *pcie_rc) { int off; - uint svr; - char *compat = NULL; struct ls_pcie *pcie = pcie_rc->pcie; - off = fdt_node_offset_by_compat_reg(blob, "fsl,ls-pcie", - pcie_rc->dbi_res.start); - if (off < 0) { -#ifdef CONFIG_FSL_PCIE_COMPAT /* Compatible with older version of dts node */ - svr = (get_svr() >> SVR_VAR_PER_SHIFT) & 0xFFFFFE; - if (svr == SVR_LS2088A || svr == SVR_LS2084A || - svr == SVR_LS2048A || svr == SVR_LS2044A || - svr == SVR_LS2081A || svr == SVR_LS2041A) - compat = "fsl,ls2088a-pcie"; - else - compat = CONFIG_FSL_PCIE_COMPAT; - if (compat) - off = fdt_node_offset_by_compat_reg(blob, - compat, pcie_rc->dbi_res.start); -#endif - if (off < 0) - return; - } + off = fdt_pcie_get_nodeoffset(blob, pcie_rc); + if (off < 0) + return; if (pcie_rc->enabled && pcie->mode == PCI_HEADER_TYPE_BRIDGE) fdt_set_node_status(blob, off, FDT_STATUS_OKAY, 0); diff --git a/drivers/phy/marvell/comphy_cp110.c b/drivers/phy/marvell/comphy_cp110.c index 15e80049def..b0fcb13f1c9 100644 --- a/drivers/phy/marvell/comphy_cp110.c +++ b/drivers/phy/marvell/comphy_cp110.c @@ -7,6 +7,7 @@ #include <fdtdec.h> #include <log.h> #include <asm/io.h> +#include <asm/ptrace.h> #include <asm/arch/cpu.h> #include <asm/arch/soc.h> #include <linux/delay.h> @@ -22,6 +23,40 @@ DECLARE_GLOBAL_DATA_PTR; #define HPIPE_ADDR(base, lane) (SD_ADDR(base, lane) + 0x800) #define COMPHY_ADDR(base, lane) (base + 0x28 * lane) +/* Firmware related definitions used for SMC calls */ +#define MV_SIP_COMPHY_POWER_ON 0x82000001 +#define MV_SIP_COMPHY_POWER_OFF 0x82000002 +#define MV_SIP_COMPHY_PLL_LOCK 0x82000003 + +/* Used to distinguish between different possible callers (U-boot/Linux) */ +#define COMPHY_CALLER_UBOOT (0x1 << 21) + +#define COMPHY_FW_MODE_FORMAT(mode) ((mode) << 12) +#define COMPHY_FW_FORMAT(mode, idx, speeds) \ + (((mode) << 12) | ((idx) << 8) | ((speeds) << 2)) + +#define COMPHY_FW_PCIE_FORMAT(pcie_width, clk_src, mode, speeds) \ + (COMPHY_CALLER_UBOOT | ((pcie_width) << 18) | \ + ((clk_src) << 17) | COMPHY_FW_FORMAT(mode, 0, speeds)) + +#define COMPHY_SATA_MODE 0x1 +#define COMPHY_SGMII_MODE 0x2 /* SGMII 1G */ +#define COMPHY_HS_SGMII_MODE 0x3 /* SGMII 2.5G */ +#define COMPHY_USB3H_MODE 0x4 +#define COMPHY_USB3D_MODE 0x5 +#define COMPHY_PCIE_MODE 0x6 +#define COMPHY_RXAUI_MODE 0x7 +#define COMPHY_XFI_MODE 0x8 +#define COMPHY_SFI_MODE 0x9 +#define COMPHY_USB3_MODE 0xa +#define COMPHY_AP_MODE 0xb + +/* Comphy unit index macro */ +#define COMPHY_UNIT_ID0 0 +#define COMPHY_UNIT_ID1 1 +#define COMPHY_UNIT_ID2 2 +#define COMPHY_UNIT_ID3 3 + struct utmi_phy_data { void __iomem *utmi_base_addr; void __iomem *usb_cfg_addr; @@ -85,441 +120,6 @@ static u32 polling_with_timeout(void __iomem *addr, u32 val, return 0; } -static int comphy_pcie_power_up(u32 lane, u32 pcie_width, bool clk_src, - bool is_end_point, void __iomem *hpipe_base, - void __iomem *comphy_base) -{ - u32 mask, data, ret = 1; - void __iomem *hpipe_addr = HPIPE_ADDR(hpipe_base, lane); - void __iomem *comphy_addr = COMPHY_ADDR(comphy_base, lane); - void __iomem *addr; - u32 pcie_clk = 0; /* set input by default */ - - debug_enter(); - - /* - * ToDo: - * Add SAR (Sample-At-Reset) configuration for the PCIe clock - * direction. SAR code is currently not ported from Marvell - * U-Boot to mainline version. - * - * SerDes Lane 4/5 got the PCIe ref-clock #1, - * and SerDes Lane 0 got PCIe ref-clock #0 - */ - debug("PCIe clock = %x\n", pcie_clk); - debug("PCIe RC = %d\n", !is_end_point); - debug("PCIe width = %d\n", pcie_width); - - /* enable PCIe by4 and by2 */ - if (lane == 0) { - if (pcie_width == 4) { - reg_set(comphy_base + COMMON_PHY_SD_CTRL1, - 0x1 << COMMON_PHY_SD_CTRL1_PCIE_X4_EN_OFFSET, - COMMON_PHY_SD_CTRL1_PCIE_X4_EN_MASK); - } else if (pcie_width == 2) { - reg_set(comphy_base + COMMON_PHY_SD_CTRL1, - 0x1 << COMMON_PHY_SD_CTRL1_PCIE_X2_EN_OFFSET, - COMMON_PHY_SD_CTRL1_PCIE_X2_EN_MASK); - } - } - - /* - * If PCIe clock is output and clock source from SerDes lane 5, - * we need to configure the clock-source MUX. - * By default, the clock source is from lane 4 - */ - if (pcie_clk && clk_src && (lane == 5)) { - reg_set((void __iomem *)DFX_DEV_GEN_CTRL12, - 0x3 << DFX_DEV_GEN_PCIE_CLK_SRC_OFFSET, - DFX_DEV_GEN_PCIE_CLK_SRC_MASK); - } - - debug("stage: RFU configurations - hard reset comphy\n"); - /* RFU configurations - hard reset comphy */ - mask = COMMON_PHY_CFG1_PWR_UP_MASK; - data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; - mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; - data |= 0x1 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; - mask |= COMMON_PHY_CFG1_PWR_ON_RESET_MASK; - data |= 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; - mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; - data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; - mask |= COMMON_PHY_PHY_MODE_MASK; - data |= 0x0 << COMMON_PHY_PHY_MODE_OFFSET; - reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); - - /* release from hard reset */ - mask = COMMON_PHY_CFG1_PWR_ON_RESET_MASK; - data = 0x1 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; - mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; - data |= 0x1 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; - reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); - - /* Wait 1ms - until band gap and ref clock ready */ - mdelay(1); - /* Start comphy Configuration */ - debug("stage: Comphy configuration\n"); - /* Set PIPE soft reset */ - mask = HPIPE_RST_CLK_CTRL_PIPE_RST_MASK; - data = 0x1 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET; - /* Set PHY datapath width mode for V0 */ - mask |= HPIPE_RST_CLK_CTRL_FIXED_PCLK_MASK; - data |= 0x1 << HPIPE_RST_CLK_CTRL_FIXED_PCLK_OFFSET; - /* Set Data bus width USB mode for V0 */ - mask |= HPIPE_RST_CLK_CTRL_PIPE_WIDTH_MASK; - data |= 0x0 << HPIPE_RST_CLK_CTRL_PIPE_WIDTH_OFFSET; - /* Set CORE_CLK output frequency for 250Mhz */ - mask |= HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_MASK; - data |= 0x0 << HPIPE_RST_CLK_CTRL_CORE_FREQ_SEL_OFFSET; - reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG, data, mask); - /* Set PLL ready delay for 0x2 */ - data = 0x2 << HPIPE_CLK_SRC_LO_PLL_RDY_DL_OFFSET; - mask = HPIPE_CLK_SRC_LO_PLL_RDY_DL_MASK; - if (pcie_width != 1) { - data |= 0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_OFFSET; - mask |= HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SEL_MASK; - data |= 0x1 << HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_OFFSET; - mask |= HPIPE_CLK_SRC_LO_BUNDLE_PERIOD_SCALE_MASK; - } - reg_set(hpipe_addr + HPIPE_CLK_SRC_LO_REG, data, mask); - - /* Set PIPE mode interface to PCIe3 - 0x1 & set lane order */ - data = 0x1 << HPIPE_CLK_SRC_HI_MODE_PIPE_OFFSET; - mask = HPIPE_CLK_SRC_HI_MODE_PIPE_MASK; - if (pcie_width != 1) { - mask |= HPIPE_CLK_SRC_HI_LANE_STRT_MASK; - mask |= HPIPE_CLK_SRC_HI_LANE_MASTER_MASK; - mask |= HPIPE_CLK_SRC_HI_LANE_BREAK_MASK; - if (lane == 0) { - data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_STRT_OFFSET; - data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_MASTER_OFFSET; - } else if (lane == (pcie_width - 1)) { - data |= 0x1 << HPIPE_CLK_SRC_HI_LANE_BREAK_OFFSET; - } - } - reg_set(hpipe_addr + HPIPE_CLK_SRC_HI_REG, data, mask); - /* Config update polarity equalization */ - reg_set(hpipe_addr + HPIPE_LANE_EQ_CFG1_REG, - 0x1 << HPIPE_CFG_UPDATE_POLARITY_OFFSET, - HPIPE_CFG_UPDATE_POLARITY_MASK); - /* Set PIPE version 4 to mode enable */ - reg_set(hpipe_addr + HPIPE_DFE_CTRL_28_REG, - 0x1 << HPIPE_DFE_CTRL_28_PIPE4_OFFSET, - HPIPE_DFE_CTRL_28_PIPE4_MASK); - /* TODO: check if pcie clock is output/input - for bringup use input*/ - /* Enable PIN clock 100M_125M */ - mask = 0; - data = 0; - /* Only if clock is output, configure the clock-source mux */ - if (pcie_clk) { - mask |= HPIPE_MISC_CLK100M_125M_MASK; - data |= 0x1 << HPIPE_MISC_CLK100M_125M_OFFSET; - } - /* - * Set PIN_TXDCLK_2X Clock Frequency Selection for outputs 500MHz - * clock - */ - mask |= HPIPE_MISC_TXDCLK_2X_MASK; - data |= 0x0 << HPIPE_MISC_TXDCLK_2X_OFFSET; - /* Enable 500MHz Clock */ - mask |= HPIPE_MISC_CLK500_EN_MASK; - data |= 0x1 << HPIPE_MISC_CLK500_EN_OFFSET; - if (pcie_clk) { /* output */ - /* Set reference clock comes from group 1 */ - mask |= HPIPE_MISC_REFCLK_SEL_MASK; - data |= 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET; - } else { - /* Set reference clock comes from group 2 */ - mask |= HPIPE_MISC_REFCLK_SEL_MASK; - data |= 0x1 << HPIPE_MISC_REFCLK_SEL_OFFSET; - } - mask |= HPIPE_MISC_ICP_FORCE_MASK; - data |= 0x1 << HPIPE_MISC_ICP_FORCE_OFFSET; - reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask); - if (pcie_clk) { /* output */ - /* Set reference frequcency select - 0x2 for 25MHz*/ - mask = HPIPE_PWR_PLL_REF_FREQ_MASK; - data = 0x2 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; - } else { - /* Set reference frequcency select - 0x0 for 100MHz*/ - mask = HPIPE_PWR_PLL_REF_FREQ_MASK; - data = 0x0 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; - } - /* Set PHY mode to PCIe */ - mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; - data |= 0x3 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; - reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); - - /* ref clock alignment */ - if (pcie_width != 1) { - mask = HPIPE_LANE_ALIGN_OFF_MASK; - data = 0x0 << HPIPE_LANE_ALIGN_OFF_OFFSET; - reg_set(hpipe_addr + HPIPE_LANE_ALIGN_REG, data, mask); - } - - /* - * Set the amount of time spent in the LoZ state - set for 0x7 only if - * the PCIe clock is output - */ - if (pcie_clk) { - reg_set(hpipe_addr + HPIPE_GLOBAL_PM_CTRL, - 0x7 << HPIPE_GLOBAL_PM_RXDLOZ_WAIT_OFFSET, - HPIPE_GLOBAL_PM_RXDLOZ_WAIT_MASK); - } - - /* Set Maximal PHY Generation Setting(8Gbps) */ - mask = HPIPE_INTERFACE_GEN_MAX_MASK; - data = 0x2 << HPIPE_INTERFACE_GEN_MAX_OFFSET; - /* Bypass frame detection and sync detection for RX DATA */ - mask = HPIPE_INTERFACE_DET_BYPASS_MASK; - data = 0x1 << HPIPE_INTERFACE_DET_BYPASS_OFFSET; - /* Set Link Train Mode (Tx training control pins are used) */ - mask |= HPIPE_INTERFACE_LINK_TRAIN_MASK; - data |= 0x1 << HPIPE_INTERFACE_LINK_TRAIN_OFFSET; - reg_set(hpipe_addr + HPIPE_INTERFACE_REG, data, mask); - - /* Set Idle_sync enable */ - mask = HPIPE_PCIE_IDLE_SYNC_MASK; - data = 0x1 << HPIPE_PCIE_IDLE_SYNC_OFFSET; - /* Select bits for PCIE Gen3(32bit) */ - mask |= HPIPE_PCIE_SEL_BITS_MASK; - data |= 0x2 << HPIPE_PCIE_SEL_BITS_OFFSET; - reg_set(hpipe_addr + HPIPE_PCIE_REG0, data, mask); - - /* Enable Tx_adapt_g1 */ - mask = HPIPE_TX_TRAIN_CTRL_G1_MASK; - data = 0x1 << HPIPE_TX_TRAIN_CTRL_G1_OFFSET; - /* Enable Tx_adapt_gn1 */ - mask |= HPIPE_TX_TRAIN_CTRL_GN1_MASK; - data |= 0x1 << HPIPE_TX_TRAIN_CTRL_GN1_OFFSET; - /* Disable Tx_adapt_g0 */ - mask |= HPIPE_TX_TRAIN_CTRL_G0_MASK; - data |= 0x0 << HPIPE_TX_TRAIN_CTRL_G0_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_REG, data, mask); - - /* Set reg_tx_train_chk_init */ - mask = HPIPE_TX_TRAIN_CHK_INIT_MASK; - data = 0x0 << HPIPE_TX_TRAIN_CHK_INIT_OFFSET; - /* Enable TX_COE_FM_PIN_PCIE3_EN */ - mask |= HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_MASK; - data |= 0x1 << HPIPE_TX_TRAIN_COE_FM_PIN_PCIE3_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_REG, data, mask); - - debug("stage: TRx training parameters\n"); - /* Set Preset sweep configurations */ - mask = HPIPE_TX_TX_STATUS_CHECK_MODE_MASK; - data = 0x1 << HPIPE_TX_STATUS_CHECK_MODE_OFFSET; - - mask |= HPIPE_TX_NUM_OF_PRESET_MASK; - data |= 0x7 << HPIPE_TX_NUM_OF_PRESET_OFFSET; - - mask |= HPIPE_TX_SWEEP_PRESET_EN_MASK; - data |= 0x1 << HPIPE_TX_SWEEP_PRESET_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_11_REG, data, mask); - - /* Tx train start configuration */ - mask = HPIPE_TX_TRAIN_START_SQ_EN_MASK; - data = 0x1 << HPIPE_TX_TRAIN_START_SQ_EN_OFFSET; - - mask |= HPIPE_TX_TRAIN_START_FRM_DET_EN_MASK; - data |= 0x0 << HPIPE_TX_TRAIN_START_FRM_DET_EN_OFFSET; - - mask |= HPIPE_TX_TRAIN_START_FRM_LOCK_EN_MASK; - data |= 0x0 << HPIPE_TX_TRAIN_START_FRM_LOCK_EN_OFFSET; - - mask |= HPIPE_TX_TRAIN_WAIT_TIME_EN_MASK; - data |= 0x1 << HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_5_REG, data, mask); - - /* Enable Tx train P2P */ - mask = HPIPE_TX_TRAIN_P2P_HOLD_MASK; - data = 0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_0_REG, data, mask); - - /* Configure Tx train timeout */ - mask = HPIPE_TRX_TRAIN_TIMER_MASK; - data = 0x17 << HPIPE_TRX_TRAIN_TIMER_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_4_REG, data, mask); - - /* Disable G0/G1/GN1 adaptation */ - mask = HPIPE_TX_TRAIN_CTRL_G1_MASK | HPIPE_TX_TRAIN_CTRL_GN1_MASK - | HPIPE_TX_TRAIN_CTRL_G0_OFFSET; - data = 0; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_REG, data, mask); - - /* Disable DTL frequency loop */ - mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK; - data = 0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask); - - /* Configure G3 DFE */ - mask = HPIPE_G3_DFE_RES_MASK; - data = 0x3 << HPIPE_G3_DFE_RES_OFFSET; - reg_set(hpipe_addr + HPIPE_G3_SETTING_4_REG, data, mask); - - /* Use TX/RX training result for DFE */ - mask = HPIPE_DFE_RES_FORCE_MASK; - data = 0x0 << HPIPE_DFE_RES_FORCE_OFFSET; - reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); - - /* Configure initial and final coefficient value for receiver */ - mask = HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK; - data = 0x1 << HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET; - - mask |= HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK; - data |= 0x1 << HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET; - - mask |= HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_MASK; - data |= 0x0 << HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_G3_SET_1_REG, data, mask); - - /* Trigger sampler enable pulse */ - mask = HPIPE_SMAPLER_MASK; - data = 0x1 << HPIPE_SMAPLER_OFFSET; - reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); - udelay(5); - reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, 0, mask); - - /* FFE resistor tuning for different bandwidth */ - mask = HPIPE_G3_FFE_DEG_RES_LEVEL_MASK; - data = 0x1 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET; - - mask |= HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK; - data |= 0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET; - reg_set(hpipe_addr + HPIPE_G3_SETTING_3_REG, data, mask); - - /* Pattern lock lost timeout disable */ - mask = HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_MASK; - data = 0x0 << HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_3_REG, data, mask); - - /* Configure DFE adaptations */ - mask = HPIPE_CDR_MAX_DFE_ADAPT_1_MASK; - data = 0x1 << HPIPE_CDR_MAX_DFE_ADAPT_1_OFFSET; - mask |= HPIPE_CDR_MAX_DFE_ADAPT_0_MASK; - data |= 0x0 << HPIPE_CDR_MAX_DFE_ADAPT_0_OFFSET; - mask |= HPIPE_CDR_RX_MAX_DFE_ADAPT_1_MASK; - data |= 0x0 << HPIPE_CDR_RX_MAX_DFE_ADAPT_1_OFFSET; - reg_set(hpipe_addr + HPIPE_CDR_CONTROL_REG, data, mask); - mask = HPIPE_DFE_TX_MAX_DFE_ADAPT_MASK; - data = 0x0 << HPIPE_DFE_TX_MAX_DFE_ADAPT_OFFSET; - reg_set(hpipe_addr + HPIPE_DFE_CONTROL_REG, data, mask); - - /* Genration 2 setting 1*/ - mask = HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK; - data = 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET; - mask |= HPIPE_G2_SET_1_G2_RX_SELMUPP_MASK; - data |= 0x1 << HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET; - mask |= HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK; - data |= 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET; - reg_set(hpipe_addr + HPIPE_G2_SET_1_REG, data, mask); - - /* DFE enable */ - mask = HPIPE_G2_DFE_RES_MASK; - data = 0x3 << HPIPE_G2_DFE_RES_OFFSET; - reg_set(hpipe_addr + HPIPE_G2_SETTINGS_4_REG, data, mask); - - /* Configure DFE Resolution */ - mask = HPIPE_LANE_CFG4_DFE_EN_SEL_MASK; - data = 0x1 << HPIPE_LANE_CFG4_DFE_EN_SEL_OFFSET; - reg_set(hpipe_addr + HPIPE_LANE_CFG4_REG, data, mask); - - /* VDD calibration control */ - mask = HPIPE_EXT_SELLV_RXSAMPL_MASK; - data = 0x16 << HPIPE_EXT_SELLV_RXSAMPL_OFFSET; - reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask); - - /* Set PLL Charge-pump Current Control */ - mask = HPIPE_G3_SETTING_5_G3_ICP_MASK; - data = 0x4 << HPIPE_G3_SETTING_5_G3_ICP_OFFSET; - reg_set(hpipe_addr + HPIPE_G3_SETTING_5_REG, data, mask); - - /* Set lane rqualization remote setting */ - mask = HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_MASK; - data = 0x1 << HPIPE_LANE_CFG_FOM_DIRN_OVERRIDE_OFFSET; - mask |= HPIPE_LANE_CFG_FOM_ONLY_MODE_MASK; - data |= 0x1 << HPIPE_LANE_CFG_FOM_ONLY_MODE_OFFFSET; - mask |= HPIPE_LANE_CFG_FOM_PRESET_VECTOR_MASK; - data |= 0x2 << HPIPE_LANE_CFG_FOM_PRESET_VECTOR_OFFSET; - reg_set(hpipe_addr + HPIPE_LANE_EQ_REMOTE_SETTING_REG, data, mask); - - if (!is_end_point) { - /* Set phy in root complex mode */ - mask = HPIPE_CFG_PHY_RC_EP_MASK; - data = 0x1 << HPIPE_CFG_PHY_RC_EP_OFFSET; - reg_set(hpipe_addr + HPIPE_LANE_EQU_CONFIG_0_REG, data, mask); - } - - debug("stage: Comphy power up\n"); - - /* - * For PCIe by4 or by2 - release from reset only after finish to - * configure all lanes - */ - if ((pcie_width == 1) || (lane == (pcie_width - 1))) { - u32 i, start_lane, end_lane; - - if (pcie_width != 1) { - /* allows writing to all lanes in one write */ - reg_set(comphy_base + COMMON_PHY_SD_CTRL1, - 0x0 << - COMMON_PHY_SD_CTRL1_COMPHY_0_4_PORT_OFFSET, - COMMON_PHY_SD_CTRL1_COMPHY_0_4_PORT_MASK); - start_lane = 0; - end_lane = pcie_width; - - /* - * Release from PIPE soft reset - * for PCIe by4 or by2 - release from soft reset - * all lanes - can't use read modify write - */ - reg_set(HPIPE_ADDR(hpipe_base, 0) + - HPIPE_RST_CLK_CTRL_REG, 0x24, 0xffffffff); - } else { - start_lane = lane; - end_lane = lane + 1; - - /* - * Release from PIPE soft reset - * for PCIe by4 or by2 - release from soft reset - * all lanes - */ - reg_set(hpipe_addr + HPIPE_RST_CLK_CTRL_REG, - 0x0 << HPIPE_RST_CLK_CTRL_PIPE_RST_OFFSET, - HPIPE_RST_CLK_CTRL_PIPE_RST_MASK); - } - - - if (pcie_width != 1) { - /* disable writing to all lanes with one write */ - reg_set(comphy_base + COMMON_PHY_SD_CTRL1, - 0x3210 << - COMMON_PHY_SD_CTRL1_COMPHY_0_4_PORT_OFFSET, - COMMON_PHY_SD_CTRL1_COMPHY_0_4_PORT_MASK); - } - - debug("stage: Check PLL\n"); - /* Read lane status */ - for (i = start_lane; i < end_lane; i++) { - addr = HPIPE_ADDR(hpipe_base, i) + - HPIPE_LANE_STATUS1_REG; - data = HPIPE_LANE_STATUS1_PCLK_EN_MASK; - mask = data; - data = polling_with_timeout(addr, data, mask, 15000); - if (data != 0) { - debug("Read from reg = %p - value = 0x%x\n", - hpipe_addr + HPIPE_LANE_STATUS1_REG, - data); - pr_err("HPIPE_LANE_STATUS1_PCLK_EN_MASK is 0\n"); - ret = 0; - } - } - } - - debug_exit(); - return ret; -} - static int comphy_usb3_power_up(u32 lane, void __iomem *hpipe_base, void __iomem *comphy_base) { @@ -642,15 +242,31 @@ static int comphy_usb3_power_up(u32 lane, void __iomem *hpipe_base, return ret; } +static int comphy_smc(u32 function_id, void __iomem *comphy_base_addr, + u32 lane, u32 mode) +{ + struct pt_regs pregs = {0}; + + pregs.regs[0] = function_id; + pregs.regs[1] = (unsigned long)comphy_base_addr; + pregs.regs[2] = lane; + pregs.regs[3] = mode; + + smc_call(&pregs); + + /* + * TODO: Firmware return 0 on success, temporary map it to u-boot + * convention, but after all comphy will be reworked the convention in + * u-boot should be change and this conversion removed + */ + return pregs.regs[0] ? 0 : 1; +} + static int comphy_sata_power_up(u32 lane, void __iomem *hpipe_base, - void __iomem *comphy_base, int cp_index, - u32 invert) + void __iomem *comphy_base_addr, int cp_index, + u32 type) { u32 mask, data, i, ret = 1; - void __iomem *hpipe_addr = HPIPE_ADDR(hpipe_base, lane); - void __iomem *sd_ip_addr = SD_ADDR(hpipe_base, lane); - void __iomem *comphy_addr = COMPHY_ADDR(comphy_base, lane); - void __iomem *addr; void __iomem *sata_base = NULL; int sata_node = -1; /* Set to -1 in order to read the first sata node */ @@ -703,255 +319,8 @@ static int comphy_sata_power_up(u32 lane, void __iomem *hpipe_base, data |= 0x0 << SATA3_CTRL_SATA_SSU_OFFSET; reg_set(sata_base + SATA3_VENDOR_DATA, data, mask); - debug("stage: RFU configurations - hard reset comphy\n"); - /* RFU configurations - hard reset comphy */ - mask = COMMON_PHY_CFG1_PWR_UP_MASK; - data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; - mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; - data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; - mask |= COMMON_PHY_CFG1_PWR_ON_RESET_MASK; - data |= 0x0 << COMMON_PHY_CFG1_PWR_ON_RESET_OFFSET; - mask |= COMMON_PHY_CFG1_CORE_RSTN_MASK; - data |= 0x0 << COMMON_PHY_CFG1_CORE_RSTN_OFFSET; - reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); - - /* Set select data width 40Bit - SATA mode only */ - reg_set(comphy_addr + COMMON_PHY_CFG6_REG, - 0x1 << COMMON_PHY_CFG6_IF_40_SEL_OFFSET, - COMMON_PHY_CFG6_IF_40_SEL_MASK); - - /* release from hard reset in SD external */ - mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); - - /* Wait 1ms - until band gap and ref clock ready */ - mdelay(1); + ret = comphy_smc(MV_SIP_COMPHY_POWER_ON, comphy_base_addr, lane, type); - debug("stage: Comphy configuration\n"); - /* Start comphy Configuration */ - /* Set reference clock to comes from group 1 - choose 25Mhz */ - reg_set(hpipe_addr + HPIPE_MISC_REG, - 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET, - HPIPE_MISC_REFCLK_SEL_MASK); - /* Reference frequency select set 1 (for SATA = 25Mhz) */ - mask = HPIPE_PWR_PLL_REF_FREQ_MASK; - data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; - /* PHY mode select (set SATA = 0x0 */ - mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; - data |= 0x0 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; - reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); - /* Set max PHY generation setting - 6Gbps */ - reg_set(hpipe_addr + HPIPE_INTERFACE_REG, - 0x2 << HPIPE_INTERFACE_GEN_MAX_OFFSET, - HPIPE_INTERFACE_GEN_MAX_MASK); - /* Set select data width 40Bit (SEL_BITS[2:0]) */ - reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, - 0x2 << HPIPE_LOOPBACK_SEL_OFFSET, HPIPE_LOOPBACK_SEL_MASK); - - debug("stage: Analog paramters from ETP(HW)\n"); - /* Set analog parameters from ETP(HW) */ - /* G1 settings */ - mask = HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK; - data = 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_SELMUPP_MASK; - data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK; - data |= 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK; - data |= 0x3 << HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK; - data |= 0x1 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET; - reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask); - - mask = HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK; - data = 0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET; - mask |= HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK; - data |= 0x2 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET; - mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK; - data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET; - mask |= HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_MASK; - data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_DEG_RES_LEVEL_OFFSET; - mask |= HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_MASK; - data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_LOAD_RES_LEVEL_OFFSET; - reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); - - /* G2 settings */ - mask = HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK; - data = 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET; - mask |= HPIPE_G2_SET_1_G2_RX_SELMUPP_MASK; - data |= 0x1 << HPIPE_G2_SET_1_G2_RX_SELMUPP_OFFSET; - mask |= HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK; - data |= 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET; - mask |= HPIPE_G2_SET_1_G2_RX_SELMUFF_MASK; - data |= 0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET; - mask |= HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_MASK; - data |= 0x1 << HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_OFFSET; - reg_set(hpipe_addr + HPIPE_G2_SET_1_REG, data, mask); - - /* G3 settings */ - mask = HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK; - data = 0x2 << HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET; - mask |= HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK; - data |= 0x2 << HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET; - mask |= HPIPE_G3_SET_1_G3_RX_SELMUFI_MASK; - data |= 0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET; - mask |= HPIPE_G3_SET_1_G3_RX_SELMUFF_MASK; - data |= 0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET; - mask |= HPIPE_G3_SET_1_G3_RX_DFE_EN_MASK; - data |= 0x1 << HPIPE_G3_SET_1_G3_RX_DFE_EN_OFFSET; - mask |= HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_MASK; - data |= 0x2 << HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_OFFSET; - mask |= HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_MASK; - data |= 0x0 << HPIPE_G3_SET_1_G3_SAMPLER_INPAIRX2_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_G3_SET_1_REG, data, mask); - - /* DTL Control */ - mask = HPIPE_PWR_CTR_DTL_SQ_DET_EN_MASK; - data = 0x1 << HPIPE_PWR_CTR_DTL_SQ_DET_EN_OFFSET; - mask |= HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_MASK; - data |= 0x1 << HPIPE_PWR_CTR_DTL_SQ_PLOOP_EN_OFFSET; - mask |= HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK; - data |= 0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET; - mask |= HPIPE_PWR_CTR_DTL_CLAMPING_SEL_MASK; - data |= 0x1 << HPIPE_PWR_CTR_DTL_CLAMPING_SEL_OFFSET; - mask |= HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_MASK; - data |= 0x1 << HPIPE_PWR_CTR_DTL_INTPCLK_DIV_FORCE_OFFSET; - mask |= HPIPE_PWR_CTR_DTL_CLK_MODE_MASK; - data |= 0x1 << HPIPE_PWR_CTR_DTL_CLK_MODE_OFFSET; - mask |= HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_MASK; - data |= 0x1 << HPIPE_PWR_CTR_DTL_CLK_MODE_FORCE_OFFSET; - reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask); - - /* Trigger sampler enable pulse (by toggleing the bit) */ - mask = HPIPE_SMAPLER_MASK; - data = 0x1 << HPIPE_SMAPLER_OFFSET; - reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); - mask = HPIPE_SMAPLER_MASK; - data = 0x0 << HPIPE_SMAPLER_OFFSET; - reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); - - /* VDD Calibration Control 3 */ - mask = HPIPE_EXT_SELLV_RXSAMPL_MASK; - data = 0x10 << HPIPE_EXT_SELLV_RXSAMPL_OFFSET; - reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask); - - /* DFE Resolution Control */ - mask = HPIPE_DFE_RES_FORCE_MASK; - data = 0x1 << HPIPE_DFE_RES_FORCE_OFFSET; - reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); - - /* DFE F3-F5 Coefficient Control */ - mask = HPIPE_DFE_F3_F5_DFE_EN_MASK; - data = 0x0 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET; - mask |= HPIPE_DFE_F3_F5_DFE_CTRL_MASK; - data = 0x0 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET; - reg_set(hpipe_addr + HPIPE_DFE_F3_F5_REG, data, mask); - - /* G3 Setting 3 */ - mask = HPIPE_G3_FFE_CAP_SEL_MASK; - data = 0xf << HPIPE_G3_FFE_CAP_SEL_OFFSET; - mask |= HPIPE_G3_FFE_RES_SEL_MASK; - data |= 0x4 << HPIPE_G3_FFE_RES_SEL_OFFSET; - mask |= HPIPE_G3_FFE_SETTING_FORCE_MASK; - data |= 0x1 << HPIPE_G3_FFE_SETTING_FORCE_OFFSET; - mask |= HPIPE_G3_FFE_DEG_RES_LEVEL_MASK; - data |= 0x1 << HPIPE_G3_FFE_DEG_RES_LEVEL_OFFSET; - mask |= HPIPE_G3_FFE_LOAD_RES_LEVEL_MASK; - data |= 0x3 << HPIPE_G3_FFE_LOAD_RES_LEVEL_OFFSET; - reg_set(hpipe_addr + HPIPE_G3_SETTING_3_REG, data, mask); - - /* G3 Setting 4 */ - mask = HPIPE_G3_DFE_RES_MASK; - data = 0x2 << HPIPE_G3_DFE_RES_OFFSET; - reg_set(hpipe_addr + HPIPE_G3_SETTING_4_REG, data, mask); - - /* Offset Phase Control */ - mask = HPIPE_OS_PH_OFFSET_MASK; - data = 0x5c << HPIPE_OS_PH_OFFSET_OFFSET; - mask |= HPIPE_OS_PH_OFFSET_FORCE_MASK; - data |= 0x1 << HPIPE_OS_PH_OFFSET_FORCE_OFFSET; - reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask); - mask = HPIPE_OS_PH_VALID_MASK; - data = 0x1 << HPIPE_OS_PH_VALID_OFFSET; - reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask); - mask = HPIPE_OS_PH_VALID_MASK; - data = 0x0 << HPIPE_OS_PH_VALID_OFFSET; - reg_set(hpipe_addr + HPIPE_PHASE_CONTROL_REG, data, mask); - - /* Set G1 TX amplitude and TX post emphasis value */ - mask = HPIPE_G1_SET_0_G1_TX_AMP_MASK; - data = 0x8 << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET; - mask |= HPIPE_G1_SET_0_G1_TX_AMP_ADJ_MASK; - data |= 0x1 << HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET; - mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_MASK; - data |= 0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET; - mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_EN_MASK; - data |= 0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, data, mask); - - /* Set G2 TX amplitude and TX post emphasis value */ - mask = HPIPE_G2_SET_0_G2_TX_AMP_MASK; - data = 0xa << HPIPE_G2_SET_0_G2_TX_AMP_OFFSET; - mask |= HPIPE_G2_SET_0_G2_TX_AMP_ADJ_MASK; - data |= 0x1 << HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET; - mask |= HPIPE_G2_SET_0_G2_TX_EMPH1_MASK; - data |= 0x2 << HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET; - mask |= HPIPE_G2_SET_0_G2_TX_EMPH1_EN_MASK; - data |= 0x1 << HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_G2_SET_0_REG, data, mask); - - /* Set G3 TX amplitude and TX post emphasis value */ - mask = HPIPE_G3_SET_0_G3_TX_AMP_MASK; - data = 0xe << HPIPE_G3_SET_0_G3_TX_AMP_OFFSET; - mask |= HPIPE_G3_SET_0_G3_TX_AMP_ADJ_MASK; - data |= 0x1 << HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET; - mask |= HPIPE_G3_SET_0_G3_TX_EMPH1_MASK; - data |= 0x6 << HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET; - mask |= HPIPE_G3_SET_0_G3_TX_EMPH1_EN_MASK; - data |= 0x1 << HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET; - mask |= HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_MASK; - data |= 0x4 << HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_OFFSET; - mask |= HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_MASK; - data |= 0x0 << HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_G3_SET_0_REG, data, mask); - - /* SERDES External Configuration 2 register */ - mask = SD_EXTERNAL_CONFIG2_SSC_ENABLE_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG2_SSC_ENABLE_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG2_REG, data, mask); - - /* DFE reset sequence */ - reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, - 0x1 << HPIPE_PWR_CTR_RST_DFE_OFFSET, - HPIPE_PWR_CTR_RST_DFE_MASK); - reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, - 0x0 << HPIPE_PWR_CTR_RST_DFE_OFFSET, - HPIPE_PWR_CTR_RST_DFE_MASK); - - /* Set RX / TX swaps */ - data = mask = 0; - if (invert & PHY_POLARITY_TXD_INVERT) { - data |= (1 << HPIPE_SYNC_PATTERN_TXD_SWAP_OFFSET); - mask |= HPIPE_SYNC_PATTERN_TXD_SWAP_MASK; - } - if (invert & PHY_POLARITY_RXD_INVERT) { - data |= (1 << HPIPE_SYNC_PATTERN_RXD_SWAP_OFFSET); - mask |= HPIPE_SYNC_PATTERN_RXD_SWAP_MASK; - } - reg_set(hpipe_addr + HPIPE_SYNC_PATTERN_REG, data, mask); - - /* SW reset for interupt logic */ - reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, - 0x1 << HPIPE_PWR_CTR_SFT_RST_OFFSET, - HPIPE_PWR_CTR_SFT_RST_MASK); - reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, - 0x0 << HPIPE_PWR_CTR_SFT_RST_OFFSET, - HPIPE_PWR_CTR_SFT_RST_MASK); - - debug("stage: Comphy power up\n"); /* * MAC configuration power up comphy - power up PLL/TX/RX * use indirect address for vendor spesific SATA control register @@ -981,469 +350,7 @@ static int comphy_sata_power_up(u32 lane, void __iomem *hpipe_base, reg_set(sata_base + SATA3_VENDOR_DATA, 0x1 << SATA_MBUS_REGRET_EN_OFFSET, SATA_MBUS_REGRET_EN_MASK); - debug("stage: Check PLL\n"); - - addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; - data = SD_EXTERNAL_STATUS0_PLL_TX_MASK & - SD_EXTERNAL_STATUS0_PLL_RX_MASK; - mask = data; - data = polling_with_timeout(addr, data, mask, 15000); - if (data != 0) { - debug("Read from reg = %p - value = 0x%x\n", - hpipe_addr + HPIPE_LANE_STATUS1_REG, data); - pr_err("SD_EXTERNAL_STATUS0_PLL_TX is %d, SD_EXTERNAL_STATUS0_PLL_RX is %d\n", - (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK), - (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK)); - ret = 0; - } - - debug_exit(); - return ret; -} - -static int comphy_sgmii_power_up(u32 lane, u32 sgmii_speed, - void __iomem *hpipe_base, - void __iomem *comphy_base) -{ - u32 mask, data, ret = 1; - void __iomem *hpipe_addr = HPIPE_ADDR(hpipe_base, lane); - void __iomem *sd_ip_addr = SD_ADDR(hpipe_base, lane); - void __iomem *comphy_addr = COMPHY_ADDR(comphy_base, lane); - void __iomem *addr; - - debug_enter(); - debug("stage: RFU configurations - hard reset comphy\n"); - /* RFU configurations - hard reset comphy */ - mask = COMMON_PHY_CFG1_PWR_UP_MASK; - data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; - mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; - data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; - reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); - - /* Select Baud Rate of Comphy And PD_PLL/Tx/Rx */ - mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; - data = 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK; - mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK; - if (sgmii_speed == PHY_SPEED_1_25G) { - data |= 0x6 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET; - data |= 0x6 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET; - } else { - /* 3.125G */ - data |= 0x8 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET; - data |= 0x8 << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET; - } - mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; - data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; - data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK; - data |= 1 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); - - /* release from hard reset */ - mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; - data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; - data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; - data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); - - /* release from hard reset */ - mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); - - - /* Wait 1ms - until band gap and ref clock ready */ - mdelay(1); - - /* Start comphy Configuration */ - debug("stage: Comphy configuration\n"); - /* set reference clock */ - mask = HPIPE_MISC_REFCLK_SEL_MASK; - data = 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET; - reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask); - /* Power and PLL Control */ - mask = HPIPE_PWR_PLL_REF_FREQ_MASK; - data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; - mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; - data |= 0x4 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; - reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); - /* Loopback register */ - mask = HPIPE_LOOPBACK_SEL_MASK; - data = 0x1 << HPIPE_LOOPBACK_SEL_OFFSET; - reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, data, mask); - /* rx control 1 */ - mask = HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK; - data = 0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET; - mask |= HPIPE_RX_CONTROL_1_CLK8T_EN_MASK; - data |= 0x0 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_RX_CONTROL_1_REG, data, mask); - /* DTL Control */ - mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK; - data = 0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask); - - /* Set analog paramters from ETP(HW) - for now use the default datas */ - debug("stage: Analog paramters from ETP(HW)\n"); - - reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, - 0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET, - HPIPE_G1_SET_0_G1_TX_EMPH1_MASK); - - debug("stage: RFU configurations- Power Up PLL,Tx,Rx\n"); - /* SERDES External Configuration */ - mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); - - /* check PLL rx & tx ready */ - addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; - data = SD_EXTERNAL_STATUS0_PLL_RX_MASK | - SD_EXTERNAL_STATUS0_PLL_TX_MASK; - mask = data; - data = polling_with_timeout(addr, data, mask, 15000); - if (data != 0) { - debug("Read from reg = %p - value = 0x%x\n", - sd_ip_addr + SD_EXTERNAL_STATUS0_REG, data); - pr_err("SD_EXTERNAL_STATUS0_PLL_RX is %d, SD_EXTERNAL_STATUS0_PLL_TX is %d\n", - (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK), - (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK)); - ret = 0; - } - - /* RX init */ - mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); - - /* check that RX init done */ - addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; - data = SD_EXTERNAL_STATUS0_RX_INIT_MASK; - mask = data; - data = polling_with_timeout(addr, data, mask, 100); - if (data != 0) { - debug("Read from reg = %p - value = 0x%x\n", sd_ip_addr + SD_EXTERNAL_STATUS0_REG, data); - pr_err("SD_EXTERNAL_STATUS0_RX_INIT is 0\n"); - ret = 0; - } - - debug("stage: RF Reset\n"); - /* RF Reset */ - mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; - data = 0x0 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); - - debug_exit(); - return ret; -} - -static int comphy_sfi_power_up(u32 lane, void __iomem *hpipe_base, - void __iomem *comphy_base, u32 speed) -{ - u32 mask, data, ret = 1; - void __iomem *hpipe_addr = HPIPE_ADDR(hpipe_base, lane); - void __iomem *sd_ip_addr = SD_ADDR(hpipe_base, lane); - void __iomem *comphy_addr = COMPHY_ADDR(comphy_base, lane); - void __iomem *addr; - - debug_enter(); - debug("stage: RFU configurations - hard reset comphy\n"); - /* RFU configurations - hard reset comphy */ - mask = COMMON_PHY_CFG1_PWR_UP_MASK; - data = 0x1 << COMMON_PHY_CFG1_PWR_UP_OFFSET; - mask |= COMMON_PHY_CFG1_PIPE_SELECT_MASK; - data |= 0x0 << COMMON_PHY_CFG1_PIPE_SELECT_OFFSET; - reg_set(comphy_addr + COMMON_PHY_CFG1_REG, data, mask); - - /* Select Baud Rate of Comphy And PD_PLL/Tx/Rx */ - mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; - data = 0x0 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_MASK; - data |= 0xE << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_RX_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_MASK; - data |= 0xE << SD_EXTERNAL_CONFIG0_SD_PHY_GEN_TX_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; - data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; - data |= 0 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_MASK; - data |= 0 << SD_EXTERNAL_CONFIG0_HALF_BUS_MODE_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); - - /* release from hard reset */ - mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; - data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; - data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; - data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); - - mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); - - - /* Wait 1ms - until band gap and ref clock ready */ - mdelay(1); - - /* Start comphy Configuration */ - debug("stage: Comphy configuration\n"); - /* set reference clock */ - mask = HPIPE_MISC_ICP_FORCE_MASK; - data = (speed == PHY_SPEED_5_15625G) ? - (0x0 << HPIPE_MISC_ICP_FORCE_OFFSET) : - (0x1 << HPIPE_MISC_ICP_FORCE_OFFSET); - mask |= HPIPE_MISC_REFCLK_SEL_MASK; - data |= 0x0 << HPIPE_MISC_REFCLK_SEL_OFFSET; - reg_set(hpipe_addr + HPIPE_MISC_REG, data, mask); - /* Power and PLL Control */ - mask = HPIPE_PWR_PLL_REF_FREQ_MASK; - data = 0x1 << HPIPE_PWR_PLL_REF_FREQ_OFFSET; - mask |= HPIPE_PWR_PLL_PHY_MODE_MASK; - data |= 0x4 << HPIPE_PWR_PLL_PHY_MODE_OFFSET; - reg_set(hpipe_addr + HPIPE_PWR_PLL_REG, data, mask); - /* Loopback register */ - mask = HPIPE_LOOPBACK_SEL_MASK; - data = 0x1 << HPIPE_LOOPBACK_SEL_OFFSET; - reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, data, mask); - /* rx control 1 */ - mask = HPIPE_RX_CONTROL_1_RXCLK2X_SEL_MASK; - data = 0x1 << HPIPE_RX_CONTROL_1_RXCLK2X_SEL_OFFSET; - mask |= HPIPE_RX_CONTROL_1_CLK8T_EN_MASK; - data |= 0x1 << HPIPE_RX_CONTROL_1_CLK8T_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_RX_CONTROL_1_REG, data, mask); - /* DTL Control */ - mask = HPIPE_PWR_CTR_DTL_FLOOP_EN_MASK; - data = 0x1 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask); - - /* Transmitter/Receiver Speed Divider Force */ - if (speed == PHY_SPEED_5_15625G) { - mask = HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_MASK; - data = 1 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_OFFSET; - mask |= HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_MASK; - data |= 1 << HPIPE_SPD_DIV_FORCE_RX_SPD_DIV_FORCE_OFFSET; - mask |= HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_MASK; - data |= 1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_OFFSET; - mask |= HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_MASK; - data |= 1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_OFFSET; - } else { - mask = HPIPE_TXDIGCK_DIV_FORCE_MASK; - data = 0x1 << HPIPE_TXDIGCK_DIV_FORCE_OFFSET; - } - reg_set(hpipe_addr + HPIPE_SPD_DIV_FORCE_REG, data, mask); - - /* Set analog paramters from ETP(HW) */ - debug("stage: Analog paramters from ETP(HW)\n"); - /* SERDES External Configuration 2 */ - mask = SD_EXTERNAL_CONFIG2_PIN_DFE_EN_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG2_PIN_DFE_EN_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG2_REG, data, mask); - /* 0x7-DFE Resolution control */ - mask = HPIPE_DFE_RES_FORCE_MASK; - data = 0x1 << HPIPE_DFE_RES_FORCE_OFFSET; - reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask); - /* 0xd-G1_Setting_0 */ - if (speed == PHY_SPEED_5_15625G) { - mask = HPIPE_G1_SET_0_G1_TX_EMPH1_MASK; - data = 0x6 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET; - } else { - mask = HPIPE_G1_SET_0_G1_TX_AMP_MASK; - data = 0x1c << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET; - mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_MASK; - data |= 0xe << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET; - } - reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, data, mask); - /* Genration 1 setting 2 (G1_Setting_2) */ - mask = HPIPE_G1_SET_2_G1_TX_EMPH0_MASK; - data = 0x0 << HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET; - mask |= HPIPE_G1_SET_2_G1_TX_EMPH0_EN_MASK; - data |= 0x1 << HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_G1_SET_2_REG, data, mask); - /* Transmitter Slew Rate Control register (tx_reg1) */ - mask = HPIPE_TX_REG1_TX_EMPH_RES_MASK; - data = 0x3 << HPIPE_TX_REG1_TX_EMPH_RES_OFFSET; - mask |= HPIPE_TX_REG1_SLC_EN_MASK; - data |= 0x3f << HPIPE_TX_REG1_SLC_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_REG1_REG, data, mask); - /* Impedance Calibration Control register (cal_reg1) */ - mask = HPIPE_CAL_REG_1_EXT_TXIMP_MASK; - data = 0xe << HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET; - mask |= HPIPE_CAL_REG_1_EXT_TXIMP_EN_MASK; - data |= 0x1 << HPIPE_CAL_REG_1_EXT_TXIMP_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_CAL_REG1_REG, data, mask); - /* Generation 1 Setting 5 (g1_setting_5) */ - mask = HPIPE_G1_SETTING_5_G1_ICP_MASK; - data = 0 << HPIPE_CAL_REG_1_EXT_TXIMP_OFFSET; - reg_set(hpipe_addr + HPIPE_G1_SETTING_5_REG, data, mask); - /* 0xE-G1_Setting_1 */ - mask = HPIPE_G1_SET_1_G1_RX_DFE_EN_MASK; - data = 0x1 << HPIPE_G1_SET_1_G1_RX_DFE_EN_OFFSET; - if (speed == PHY_SPEED_5_15625G) { - mask |= HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK; - data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_SELMUPP_MASK; - data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET; - } else { - mask |= HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK; - data |= 0x2 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_SELMUPP_MASK; - data |= 0x2 << HPIPE_G1_SET_1_G1_RX_SELMUPP_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK; - data |= 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK; - data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET; - mask |= HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK; - data |= 0x3 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET; - } - reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask); - - /* 0xA-DFE_Reg3 */ - mask = HPIPE_DFE_F3_F5_DFE_EN_MASK; - data = 0x0 << HPIPE_DFE_F3_F5_DFE_EN_OFFSET; - mask |= HPIPE_DFE_F3_F5_DFE_CTRL_MASK; - data |= 0x0 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET; - reg_set(hpipe_addr + HPIPE_DFE_F3_F5_REG, data, mask); - - /* 0x111-G1_Setting_4 */ - mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK; - data = 0x1 << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET; - reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask); - /* Genration 1 setting 3 (G1_Setting_3) */ - mask = HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_MASK; - data = 0x1 << HPIPE_G1_SETTINGS_3_G1_FBCK_SEL_OFFSET; - if (speed == PHY_SPEED_5_15625G) { - /* Force FFE (Feed Forward Equalization) to 5G */ - mask |= HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK; - data |= 0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET; - mask |= HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK; - data |= 0x4 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET; - mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK; - data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET; - } - reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask); - - /* Connfigure RX training timer */ - mask = HPIPE_RX_TRAIN_TIMER_MASK; - data = 0x13 << HPIPE_RX_TRAIN_TIMER_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_5_REG, data, mask); - - /* Enable TX train peak to peak hold */ - mask = HPIPE_TX_TRAIN_P2P_HOLD_MASK; - data = 0x1 << HPIPE_TX_TRAIN_P2P_HOLD_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_CTRL_0_REG, data, mask); - - /* Configure TX preset index */ - mask = HPIPE_TX_PRESET_INDEX_MASK; - data = 0x2 << HPIPE_TX_PRESET_INDEX_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_PRESET_INDEX_REG, data, mask); - - /* Disable pattern lock lost timeout */ - mask = HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_MASK; - data = 0x0 << HPIPE_PATTERN_LOCK_LOST_TIMEOUT_EN_OFFSET; - reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_3_REG, data, mask); - - /* Configure TX training pattern and TX training 16bit auto */ - mask = HPIPE_TX_TRAIN_16BIT_AUTO_EN_MASK; - data = 0x1 << HPIPE_TX_TRAIN_16BIT_AUTO_EN_OFFSET; - mask |= HPIPE_TX_TRAIN_PAT_SEL_MASK; - data |= 0x1 << HPIPE_TX_TRAIN_PAT_SEL_OFFSET; - reg_set(hpipe_addr + HPIPE_TX_TRAIN_REG, data, mask); - - /* Configure Training patten number */ - mask = HPIPE_TRAIN_PAT_NUM_MASK; - data = 0x88 << HPIPE_TRAIN_PAT_NUM_OFFSET; - reg_set(hpipe_addr + HPIPE_FRAME_DETECT_CTRL_0_REG, data, mask); - - /* Configure differencial manchester encoter to ethernet mode */ - mask = HPIPE_DME_ETHERNET_MODE_MASK; - data = 0x1 << HPIPE_DME_ETHERNET_MODE_OFFSET; - reg_set(hpipe_addr + HPIPE_DME_REG, data, mask); - - /* Configure VDD Continuous Calibration */ - mask = HPIPE_CAL_VDD_CONT_MODE_MASK; - data = 0x1 << HPIPE_CAL_VDD_CONT_MODE_OFFSET; - reg_set(hpipe_addr + HPIPE_VDD_CAL_0_REG, data, mask); - - /* Trigger sampler enable pulse (by toggleing the bit) */ - mask = HPIPE_RX_SAMPLER_OS_GAIN_MASK; - data = 0x3 << HPIPE_RX_SAMPLER_OS_GAIN_OFFSET; - mask |= HPIPE_SMAPLER_MASK; - data |= 0x1 << HPIPE_SMAPLER_OFFSET; - reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); - mask = HPIPE_SMAPLER_MASK; - data = 0x0 << HPIPE_SMAPLER_OFFSET; - reg_set(hpipe_addr + HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG, data, mask); - - /* Set External RX Regulator Control */ - mask = HPIPE_EXT_SELLV_RXSAMPL_MASK; - data = 0x1A << HPIPE_EXT_SELLV_RXSAMPL_OFFSET; - reg_set(hpipe_addr + HPIPE_VDD_CAL_CTRL_REG, data, mask); - - debug("stage: RFU configurations- Power Up PLL,Tx,Rx\n"); - /* SERDES External Configuration */ - mask = SD_EXTERNAL_CONFIG0_SD_PU_PLL_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_PLL_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PU_RX_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_RX_OFFSET; - mask |= SD_EXTERNAL_CONFIG0_SD_PU_TX_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG0_SD_PU_TX_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG0_REG, data, mask); - - - /* check PLL rx & tx ready */ - addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; - data = SD_EXTERNAL_STATUS0_PLL_RX_MASK | - SD_EXTERNAL_STATUS0_PLL_TX_MASK; - mask = data; - data = polling_with_timeout(addr, data, mask, 15000); - if (data != 0) { - debug("Read from reg = %p - value = 0x%x\n", sd_ip_addr + SD_EXTERNAL_STATUS0_REG, data); - pr_err("SD_EXTERNAL_STATUS0_PLL_RX is %d, SD_EXTERNAL_STATUS0_PLL_TX is %d\n", - (data & SD_EXTERNAL_STATUS0_PLL_RX_MASK), - (data & SD_EXTERNAL_STATUS0_PLL_TX_MASK)); - ret = 0; - } - - /* RX init */ - mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; - data = 0x1 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); - - - /* check that RX init done */ - addr = sd_ip_addr + SD_EXTERNAL_STATUS0_REG; - data = SD_EXTERNAL_STATUS0_RX_INIT_MASK; - mask = data; - data = polling_with_timeout(addr, data, mask, 100); - if (data != 0) { - debug("Read from reg = %p - value = 0x%x\n", - sd_ip_addr + SD_EXTERNAL_STATUS0_REG, data); - pr_err("SD_EXTERNAL_STATUS0_RX_INIT is 0\n"); - ret = 0; - } - - debug("stage: RF Reset\n"); - /* RF Reset */ - mask = SD_EXTERNAL_CONFIG1_RX_INIT_MASK; - data = 0x0 << SD_EXTERNAL_CONFIG1_RX_INIT_OFFSET; - mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK; - data |= 0x1 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET; - reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask); + ret = comphy_smc(MV_SIP_COMPHY_PLL_LOCK, comphy_base_addr, lane, type); debug_exit(); return ret; @@ -1973,6 +880,7 @@ int comphy_cp110_init(struct chip_serdes_phy_config *ptr_chip_cfg, void __iomem *comphy_base_addr, *hpipe_base_addr; u32 comphy_max_count, lane, ret = 0; u32 pcie_width = 0; + u32 mode; debug_enter(); @@ -2011,19 +919,23 @@ int comphy_cp110_init(struct chip_serdes_phy_config *ptr_chip_cfg, case PHY_TYPE_PEX1: case PHY_TYPE_PEX2: case PHY_TYPE_PEX3: - ret = comphy_pcie_power_up( - lane, pcie_width, ptr_comphy_map->clk_src, - serdes_map->end_point, - hpipe_base_addr, comphy_base_addr); + mode = COMPHY_FW_PCIE_FORMAT(pcie_width, + ptr_comphy_map->clk_src, + COMPHY_PCIE_MODE, + ptr_comphy_map->speed); + ret = comphy_smc(MV_SIP_COMPHY_POWER_ON, + ptr_chip_cfg->comphy_base_addr, lane, + mode); break; case PHY_TYPE_SATA0: case PHY_TYPE_SATA1: case PHY_TYPE_SATA2: case PHY_TYPE_SATA3: - ret = comphy_sata_power_up( - lane, hpipe_base_addr, comphy_base_addr, - ptr_chip_cfg->cp_index, - serdes_map[lane].invert); + mode = COMPHY_FW_MODE_FORMAT(COMPHY_SATA_MODE); + ret = comphy_sata_power_up(lane, hpipe_base_addr, + comphy_base_addr, + ptr_chip_cfg->cp_index, + mode); break; case PHY_TYPE_USB3_HOST0: case PHY_TYPE_USB3_HOST1: @@ -2033,6 +945,25 @@ int comphy_cp110_init(struct chip_serdes_phy_config *ptr_chip_cfg, break; case PHY_TYPE_SGMII0: case PHY_TYPE_SGMII1: + if (ptr_comphy_map->speed == PHY_SPEED_INVALID) { + debug("Warning: "); + debug("SGMII PHY speed in lane %d is invalid,", + lane); + debug(" set PHY speed to 1.25G\n"); + ptr_comphy_map->speed = PHY_SPEED_1_25G; + } + + /* + * UINIT_ID not relevant for SGMII0 and SGMII1 - will be + * ignored by firmware + */ + mode = COMPHY_FW_FORMAT(COMPHY_SGMII_MODE, + COMPHY_UNIT_ID0, + ptr_comphy_map->speed); + ret = comphy_smc(MV_SIP_COMPHY_POWER_ON, + ptr_chip_cfg->comphy_base_addr, lane, + mode); + break; case PHY_TYPE_SGMII2: case PHY_TYPE_SGMII3: if (ptr_comphy_map->speed == PHY_SPEED_INVALID) { @@ -2040,14 +971,21 @@ int comphy_cp110_init(struct chip_serdes_phy_config *ptr_chip_cfg, lane); ptr_comphy_map->speed = PHY_SPEED_1_25G; } - ret = comphy_sgmii_power_up( - lane, ptr_comphy_map->speed, hpipe_base_addr, - comphy_base_addr); + + mode = COMPHY_FW_FORMAT(COMPHY_SGMII_MODE, + COMPHY_UNIT_ID2, + ptr_comphy_map->speed); + ret = comphy_smc(MV_SIP_COMPHY_POWER_ON, + ptr_chip_cfg->comphy_base_addr, lane, + mode); break; case PHY_TYPE_SFI: - ret = comphy_sfi_power_up(lane, hpipe_base_addr, - comphy_base_addr, - ptr_comphy_map->speed); + mode = COMPHY_FW_FORMAT(COMPHY_SFI_MODE, + COMPHY_UNIT_ID0, + ptr_comphy_map->speed); + ret = comphy_smc(MV_SIP_COMPHY_POWER_ON, + ptr_chip_cfg->comphy_base_addr, lane, + mode); break; case PHY_TYPE_RXAUI0: case PHY_TYPE_RXAUI1: diff --git a/drivers/phy/nop-phy.c b/drivers/phy/nop-phy.c index a5eed20f3f9..ba71785fe42 100644 --- a/drivers/phy/nop-phy.c +++ b/drivers/phy/nop-phy.c @@ -4,17 +4,50 @@ * Written by Jean-Jacques Hiblot <jjhiblot@ti.com> */ +#include <clk.h> #include <common.h> #include <dm.h> #include <dm/device.h> +#include <dm/device_compat.h> #include <generic-phy.h> +struct nop_phy_priv { + struct clk_bulk bulk; +}; + +static int nop_phy_init(struct phy *phy) +{ + struct nop_phy_priv *priv = dev_get_priv(phy->dev); + + if (CONFIG_IS_ENABLED(CLK)) + return clk_enable_bulk(&priv->bulk); + + return 0; +} + +static int nop_phy_probe(struct udevice *dev) +{ + struct nop_phy_priv *priv = dev_get_priv(dev); + int ret; + + if (CONFIG_IS_ENABLED(CLK)) { + ret = clk_get_bulk(dev, &priv->bulk); + if (ret < 0) { + dev_err(dev, "Failed to get clk: %d\n", ret); + return ret; + } + } + + return 0; +} + static const struct udevice_id nop_phy_ids[] = { { .compatible = "nop-phy" }, { } }; static struct phy_ops nop_phy_ops = { + .init = nop_phy_init, }; U_BOOT_DRIVER(nop_phy) = { @@ -22,4 +55,6 @@ U_BOOT_DRIVER(nop_phy) = { .id = UCLASS_PHY, .of_match = nop_phy_ids, .ops = &nop_phy_ops, + .probe = nop_phy_probe, + .priv_auto_alloc_size = sizeof(struct nop_phy_priv), }; diff --git a/drivers/phy/omap-usb2-phy.c b/drivers/phy/omap-usb2-phy.c index adc454ddd48..a981cb2f8d6 100644 --- a/drivers/phy/omap-usb2-phy.c +++ b/drivers/phy/omap-usb2-phy.c @@ -12,6 +12,7 @@ #include <errno.h> #include <generic-phy.h> #include <regmap.h> +#include <soc.h> #include <syscon.h> #include <linux/bitops.h> #include <linux/err.h> @@ -196,6 +197,11 @@ struct phy_ops omap_usb2_phy_ops = { .exit = omap_usb2_phy_exit, }; +static const struct soc_attr am65x_sr10_soc_devices[] = { + { .family = "AM65X", .revision = "SR1.0" }, + { /* sentinel */ } +}; + int omap_usb2_phy_probe(struct udevice *dev) { int rc; @@ -222,10 +228,9 @@ int omap_usb2_phy_probe(struct udevice *dev) * Disabling the USB2_PHY Charger Detect function will put D+ * into the normal state. * - * Using property "ti,dis-chg-det-quirk" in the DT usb2-phy node - * to enable this workaround for AM654x PG1.0. + * Enable this workaround for AM654x PG1.0. */ - if (dev_read_bool(dev, "ti,dis-chg-det-quirk")) + if (soc_device_match(am65x_sr10_soc_devices)) priv->flags |= OMAP_USB2_DISABLE_CHG_DET; regmap = syscon_regmap_lookup_by_phandle(dev, "syscon-phy-power"); diff --git a/drivers/phy/phy-uclass.c b/drivers/phy/phy-uclass.c index f344e94b438..ef03e3a5025 100644 --- a/drivers/phy/phy-uclass.c +++ b/drivers/phy/phy-uclass.c @@ -6,9 +6,9 @@ #include <common.h> #include <dm.h> +#include <dm/device_compat.h> #include <dm/devres.h> #include <generic-phy.h> -#include <log.h> static inline struct phy_ops *phy_dev_ops(struct udevice *dev) { diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 048583f39b0..77fb8511144 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -291,6 +291,13 @@ config ASPEED_AST2500_PINCTRL uses Generic Pinctrl framework and is compatible with the Linux driver, i.e. it uses the same device tree configuration. +config PINCTRL_K210 + bool "Kendryte K210 Fully-Programmable Input/Output Array driver" + depends on DM && PINCTRL_GENERIC + help + Support pin multiplexing on the K210. The "FPIOA" can remap any + supported function to any multifunctional IO pin. It can also perform + basic GPIO functions, such as reading the current value of a pin. endif source "drivers/pinctrl/broadcom/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 507dd3a926c..05b71f2f134 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_PINCTRL_SANDBOX) += pinctrl-sandbox.o obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/ obj-$(CONFIG_PINCTRL_PIC32) += pinctrl_pic32.o obj-$(CONFIG_PINCTRL_EXYNOS) += exynos/ +obj-$(CONFIG_PINCTRL_K210) += pinctrl-kendryte.o obj-$(CONFIG_PINCTRL_MESON) += meson/ obj-$(CONFIG_PINCTRL_MTK) += mediatek/ obj-$(CONFIG_PINCTRL_MSCC) += mscc/ diff --git a/drivers/pinctrl/pinctrl-generic.c b/drivers/pinctrl/pinctrl-generic.c index 313aeccb1ee..3c8e24088ce 100644 --- a/drivers/pinctrl/pinctrl-generic.c +++ b/drivers/pinctrl/pinctrl-generic.c @@ -227,6 +227,13 @@ static int pinconf_enable_setting(struct udevice *dev, bool is_group, } #endif +enum pinmux_subnode_type { + PST_NONE = 0, + PST_PIN, + PST_GROUP, + PST_PINMUX, +}; + /** * pinctrl_generic_set_state_one() - set state for a certain pin/group * Apply all pin multiplexing and pin configurations specified by @config @@ -234,13 +241,15 @@ static int pinconf_enable_setting(struct udevice *dev, bool is_group, * * @dev: pin controller device * @config: pseudo device pointing to config node - * @is_group: target of operation (true: pin group, false: pin) - * @selector: pin selector or group selector, depending on @is_group + * @subnode_type: target of operation (pin, group, or pin specified by a pinmux + * group) + * @selector: pin selector or group selector, depending on @subnode_type * @return: 0 on success, or negative error code on failure */ static int pinctrl_generic_set_state_one(struct udevice *dev, struct udevice *config, - bool is_group, unsigned selector) + enum pinmux_subnode_type subnode_type, + unsigned selector) { const char *propname; const void *value; @@ -248,17 +257,22 @@ static int pinctrl_generic_set_state_one(struct udevice *dev, int len, func_selector, param, ret; u32 arg, default_val; + assert(subnode_type != PST_NONE); + dev_for_each_property(property, config) { value = dev_read_prop_by_prop(&property, &propname, &len); if (!value) return -EINVAL; - if (!strcmp(propname, "function")) { + /* pinmux subnodes already have their muxing set */ + if (subnode_type != PST_PINMUX && + !strcmp(propname, "function")) { func_selector = pinmux_func_name_to_selector(dev, value); if (func_selector < 0) return func_selector; - ret = pinmux_enable_setting(dev, is_group, + ret = pinmux_enable_setting(dev, + subnode_type == PST_GROUP, selector, func_selector); } else { @@ -272,7 +286,8 @@ static int pinctrl_generic_set_state_one(struct udevice *dev, else arg = default_val; - ret = pinconf_enable_setting(dev, is_group, + ret = pinconf_enable_setting(dev, + subnode_type == PST_GROUP, selector, param, arg); } @@ -284,6 +299,41 @@ static int pinctrl_generic_set_state_one(struct udevice *dev, } /** + * pinctrl_generic_get_subnode_type() - determine whether there is a valid + * pins, groups, or pinmux property in the config node + * + * @dev: pin controller device + * @config: pseudo device pointing to config node + * @count: number of specifiers contained within the property + * @return: the type of the subnode, or PST_NONE + */ +static enum pinmux_subnode_type pinctrl_generic_get_subnode_type(struct udevice *dev, + struct udevice *config, + int *count) +{ + const struct pinctrl_ops *ops = pinctrl_get_ops(dev); + + *count = dev_read_string_count(config, "pins"); + if (*count >= 0) + return PST_PIN; + + *count = dev_read_string_count(config, "groups"); + if (*count >= 0) + return PST_GROUP; + + if (ops->pinmux_property_set) { + *count = dev_read_size(config, "pinmux"); + if (*count >= 0 && !(*count % sizeof(u32))) { + *count /= sizeof(u32); + return PST_PINMUX; + } + } + + *count = 0; + return PST_NONE; +} + +/** * pinctrl_generic_set_state_subnode() - apply all settings in config node * * @dev: pin controller device @@ -293,38 +343,55 @@ static int pinctrl_generic_set_state_one(struct udevice *dev, static int pinctrl_generic_set_state_subnode(struct udevice *dev, struct udevice *config) { - const char *subnode_target_type = "pins"; - bool is_group = false; + enum pinmux_subnode_type subnode_type; const char *name; - int strings_count, selector, i, ret; - - strings_count = dev_read_string_count(config, subnode_target_type); - if (strings_count < 0) { - subnode_target_type = "groups"; - is_group = true; - strings_count = dev_read_string_count(config, - subnode_target_type); - if (strings_count < 0) { + int count, selector, i, ret, scratch; + const u32 *pinmux_groups = NULL; /* prevent use-uninitialized warning */ + + subnode_type = pinctrl_generic_get_subnode_type(dev, config, &count); + + debug("%s(%s, %s): count=%d\n", __func__, dev->name, config->name, + count); + + if (subnode_type == PST_PINMUX) { + pinmux_groups = dev_read_prop(config, "pinmux", &scratch); + if (!pinmux_groups) + return -EINVAL; + } + + for (i = 0; i < count; i++) { + switch (subnode_type) { + case PST_PIN: + ret = dev_read_string_index(config, "pins", i, &name); + if (ret) + return ret; + selector = pinctrl_pin_name_to_selector(dev, name); + break; + case PST_GROUP: + ret = dev_read_string_index(config, "groups", i, &name); + if (ret) + return ret; + selector = pinctrl_group_name_to_selector(dev, name); + break; + case PST_PINMUX: { + const struct pinctrl_ops *ops = pinctrl_get_ops(dev); + u32 pinmux_group = fdt32_to_cpu(pinmux_groups[i]); + + /* Checked for in pinctrl_generic_get_subnode_type */ + selector = ops->pinmux_property_set(dev, pinmux_group); + break; + } + case PST_NONE: + default: /* skip this node; may contain config child nodes */ return 0; } - } - - for (i = 0; i < strings_count; i++) { - ret = dev_read_string_index(config, subnode_target_type, i, - &name); - if (ret) - return ret; - if (is_group) - selector = pinctrl_group_name_to_selector(dev, name); - else - selector = pinctrl_pin_name_to_selector(dev, name); if (selector < 0) return selector; - ret = pinctrl_generic_set_state_one(dev, config, - is_group, selector); + ret = pinctrl_generic_set_state_one(dev, config, subnode_type, + selector); if (ret) return ret; } diff --git a/drivers/pinctrl/pinctrl-kendryte.c b/drivers/pinctrl/pinctrl-kendryte.c new file mode 100644 index 00000000000..5ad049d9555 --- /dev/null +++ b/drivers/pinctrl/pinctrl-kendryte.c @@ -0,0 +1,737 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2020 Sean Anderson <seanga2@gmail.com> + */ + +#include <common.h> +#include <clk.h> +#include <dm.h> +#include <dm/pinctrl.h> +#include <dt-bindings/pinctrl/k210-pinctrl.h> +#include <mapmem.h> +#include <regmap.h> +#include <syscon.h> +#include <asm/io.h> +#include <linux/err.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> + +/* + * The K210 only implements 8 drive levels, even though there is register space + * for 16 + */ +#define K210_PC_DRIVE_MASK GENMASK(11, 8) +#define K210_PC_DRIVE_SHIFT 8 +#define K210_PC_DRIVE_0 (0 << K210_PC_DRIVE_SHIFT) +#define K210_PC_DRIVE_1 (1 << K210_PC_DRIVE_SHIFT) +#define K210_PC_DRIVE_2 (2 << K210_PC_DRIVE_SHIFT) +#define K210_PC_DRIVE_3 (3 << K210_PC_DRIVE_SHIFT) +#define K210_PC_DRIVE_4 (4 << K210_PC_DRIVE_SHIFT) +#define K210_PC_DRIVE_5 (5 << K210_PC_DRIVE_SHIFT) +#define K210_PC_DRIVE_6 (6 << K210_PC_DRIVE_SHIFT) +#define K210_PC_DRIVE_7 (7 << K210_PC_DRIVE_SHIFT) +#define K210_PC_DRIVE_MAX 7 + +#define K210_PC_MODE_MASK GENMASK(23, 12) +/* + * output enabled == PC_OE & (PC_OE_INV ^ FUNCTION_OE) where FUNCTION_OE is a + * physical signal from the function + */ +#define K210_PC_OE BIT(12) /* Output Enable */ +#define K210_PC_OE_INV BIT(13) /* INVert function-controlled Output Enable */ +#define K210_PC_DO_OE BIT(14) /* set Data Out to the Output Enable signal */ +#define K210_PC_DO_INV BIT(15) /* INVert final Data Output */ +#define K210_PC_PU BIT(16) /* Pull Up */ +#define K210_PC_PD BIT(17) /* Pull Down */ +/* Strong pull up not implemented on K210 */ +#define K210_PC_SL BIT(19) /* reduce SLew rate to prevent overshoot */ +/* Same semantics as OE above */ +#define K210_PC_IE BIT(20) /* Input Enable */ +#define K210_PC_IE_INV BIT(21) /* INVert function-controlled Input Enable */ +#define K210_PC_DI_INV BIT(22) /* INVert Data Input */ +#define K210_PC_ST BIT(23) /* Schmitt Trigger */ +#define K210_PC_DI BIT(31) /* raw Data Input */ +#define K210_PC_BIAS_MASK (K210_PC_PU & K210_PC_PD) + +#define K210_PC_MODE_IN (K210_PC_IE | K210_PC_ST) +#define K210_PC_MODE_OUT (K210_PC_DRIVE_7 | K210_PC_OE) +#define K210_PC_MODE_I2C (K210_PC_MODE_IN | K210_PC_IE_INV | K210_PC_SL | \ + K210_PC_OE | K210_PC_OE_INV | K210_PC_PU) +#define K210_PC_MODE_SPI (K210_PC_MODE_IN | K210_PC_IE_INV | \ + K210_PC_MODE_OUT | K210_PC_OE_INV) +#define K210_PC_MODE_GPIO (K210_PC_MODE_IN | K210_PC_MODE_OUT) + +#define K210_PG_FUNC GENMASK(7, 0) +#define K210_PG_DO BIT(8) +#define K210_PG_PIN GENMASK(22, 16) + +#define PIN_CONFIG_OUTPUT_INVERT (PIN_CONFIG_END + 1) +#define PIN_CONFIG_INPUT_INVERT (PIN_CONFIG_END + 2) + +struct k210_fpioa { + u32 pins[48]; + u32 tie_en[8]; + u32 tie_val[8]; +}; + +struct k210_pc_priv { + struct clk clk; + struct k210_fpioa __iomem *fpioa; /* FPIOA register */ + struct regmap *sysctl; /* Sysctl regmap */ + u32 power_offset; /* Power bank register offset */ +}; + +#ifdef CONFIG_CMD_PINMUX +static const char k210_pc_pin_names[][6] = { +#define PIN(i) \ + [i] = "IO_" #i + PIN(0), + PIN(1), + PIN(2), + PIN(3), + PIN(4), + PIN(5), + PIN(6), + PIN(7), + PIN(8), + PIN(9), + PIN(10), + PIN(11), + PIN(12), + PIN(13), + PIN(14), + PIN(15), + PIN(16), + PIN(17), + PIN(18), + PIN(19), + PIN(20), + PIN(21), + PIN(22), + PIN(23), + PIN(24), + PIN(25), + PIN(26), + PIN(27), + PIN(28), + PIN(29), + PIN(30), + PIN(31), + PIN(32), + PIN(33), + PIN(34), + PIN(35), + PIN(36), + PIN(37), + PIN(38), + PIN(39), + PIN(40), + PIN(41), + PIN(42), + PIN(43), + PIN(44), + PIN(45), + PIN(46), + PIN(47), +#undef PIN +}; + +static int k210_pc_get_pins_count(struct udevice *dev) +{ + return ARRAY_SIZE(k210_pc_pin_names); +}; + +static const char *k210_pc_get_pin_name(struct udevice *dev, unsigned selector) +{ + return k210_pc_pin_names[selector]; +} +#endif /* CONFIG_CMD_PINMUX */ + +/* These are just power domains */ +static const char k210_pc_group_names[][3] = { + [0] = "A0", + [1] = "A1", + [2] = "A2", + [3] = "B0", + [4] = "B1", + [5] = "B2", + [6] = "C0", + [7] = "C1", +}; + +static int k210_pc_get_groups_count(struct udevice *dev) +{ + return ARRAY_SIZE(k210_pc_group_names); +} + +static const char *k210_pc_get_group_name(struct udevice *dev, + unsigned selector) +{ + return k210_pc_group_names[selector]; +} + +enum k210_pc_mode_id { + K210_PC_DEFAULT_DISABLED, + K210_PC_DEFAULT_IN, + K210_PC_DEFAULT_IN_TIE, + K210_PC_DEFAULT_OUT, + K210_PC_DEFAULT_I2C, + K210_PC_DEFAULT_SPI, + K210_PC_DEFAULT_GPIO, + K210_PC_DEFAULT_INT13, +}; + +static const u32 k210_pc_mode_id_to_mode[] = { +#define DEFAULT(mode) \ + [K210_PC_DEFAULT_##mode] = K210_PC_MODE_##mode + [K210_PC_DEFAULT_DISABLED] = 0, + DEFAULT(IN), + [K210_PC_DEFAULT_IN_TIE] = K210_PC_MODE_IN, + DEFAULT(OUT), + DEFAULT(I2C), + DEFAULT(SPI), + DEFAULT(GPIO), + [K210_PC_DEFAULT_INT13] = K210_PC_MODE_IN | K210_PC_PU, +#undef DEFAULT +}; + +/* This saves around 2K vs having a pointer+mode */ +struct k210_pcf_info { +#ifdef CONFIG_CMD_PINMUX + char name[15]; +#endif + u8 mode_id; +}; + +static const struct k210_pcf_info k210_pcf_infos[] = { +#ifdef CONFIG_CMD_PINMUX +#define FUNC(id, mode) \ + [K210_PCF_##id] = { \ + .name = #id, \ + .mode_id = K210_PC_DEFAULT_##mode \ + } +#else +#define FUNC(id, mode) \ + [K210_PCF_##id] = { \ + .mode_id = K210_PC_DEFAULT_##mode \ + } +#endif + FUNC(JTAG_TCLK, IN), + FUNC(JTAG_TDI, IN), + FUNC(JTAG_TMS, IN), + FUNC(JTAG_TDO, OUT), + FUNC(SPI0_D0, SPI), + FUNC(SPI0_D1, SPI), + FUNC(SPI0_D2, SPI), + FUNC(SPI0_D3, SPI), + FUNC(SPI0_D4, SPI), + FUNC(SPI0_D5, SPI), + FUNC(SPI0_D6, SPI), + FUNC(SPI0_D7, SPI), + FUNC(SPI0_SS0, OUT), + FUNC(SPI0_SS1, OUT), + FUNC(SPI0_SS2, OUT), + FUNC(SPI0_SS3, OUT), + FUNC(SPI0_ARB, IN_TIE), + FUNC(SPI0_SCLK, OUT), + FUNC(UARTHS_RX, IN), + FUNC(UARTHS_TX, OUT), + FUNC(RESV6, IN), + FUNC(RESV7, IN), + FUNC(CLK_SPI1, OUT), + FUNC(CLK_I2C1, OUT), + FUNC(GPIOHS0, GPIO), + FUNC(GPIOHS1, GPIO), + FUNC(GPIOHS2, GPIO), + FUNC(GPIOHS3, GPIO), + FUNC(GPIOHS4, GPIO), + FUNC(GPIOHS5, GPIO), + FUNC(GPIOHS6, GPIO), + FUNC(GPIOHS7, GPIO), + FUNC(GPIOHS8, GPIO), + FUNC(GPIOHS9, GPIO), + FUNC(GPIOHS10, GPIO), + FUNC(GPIOHS11, GPIO), + FUNC(GPIOHS12, GPIO), + FUNC(GPIOHS13, GPIO), + FUNC(GPIOHS14, GPIO), + FUNC(GPIOHS15, GPIO), + FUNC(GPIOHS16, GPIO), + FUNC(GPIOHS17, GPIO), + FUNC(GPIOHS18, GPIO), + FUNC(GPIOHS19, GPIO), + FUNC(GPIOHS20, GPIO), + FUNC(GPIOHS21, GPIO), + FUNC(GPIOHS22, GPIO), + FUNC(GPIOHS23, GPIO), + FUNC(GPIOHS24, GPIO), + FUNC(GPIOHS25, GPIO), + FUNC(GPIOHS26, GPIO), + FUNC(GPIOHS27, GPIO), + FUNC(GPIOHS28, GPIO), + FUNC(GPIOHS29, GPIO), + FUNC(GPIOHS30, GPIO), + FUNC(GPIOHS31, GPIO), + FUNC(GPIO0, GPIO), + FUNC(GPIO1, GPIO), + FUNC(GPIO2, GPIO), + FUNC(GPIO3, GPIO), + FUNC(GPIO4, GPIO), + FUNC(GPIO5, GPIO), + FUNC(GPIO6, GPIO), + FUNC(GPIO7, GPIO), + FUNC(UART1_RX, IN), + FUNC(UART1_TX, OUT), + FUNC(UART2_RX, IN), + FUNC(UART2_TX, OUT), + FUNC(UART3_RX, IN), + FUNC(UART3_TX, OUT), + FUNC(SPI1_D0, SPI), + FUNC(SPI1_D1, SPI), + FUNC(SPI1_D2, SPI), + FUNC(SPI1_D3, SPI), + FUNC(SPI1_D4, SPI), + FUNC(SPI1_D5, SPI), + FUNC(SPI1_D6, SPI), + FUNC(SPI1_D7, SPI), + FUNC(SPI1_SS0, OUT), + FUNC(SPI1_SS1, OUT), + FUNC(SPI1_SS2, OUT), + FUNC(SPI1_SS3, OUT), + FUNC(SPI1_ARB, IN_TIE), + FUNC(SPI1_SCLK, OUT), + FUNC(SPI2_D0, SPI), + FUNC(SPI2_SS, IN), + FUNC(SPI2_SCLK, IN), + FUNC(I2S0_MCLK, OUT), + FUNC(I2S0_SCLK, OUT), + FUNC(I2S0_WS, OUT), + FUNC(I2S0_IN_D0, IN), + FUNC(I2S0_IN_D1, IN), + FUNC(I2S0_IN_D2, IN), + FUNC(I2S0_IN_D3, IN), + FUNC(I2S0_OUT_D0, OUT), + FUNC(I2S0_OUT_D1, OUT), + FUNC(I2S0_OUT_D2, OUT), + FUNC(I2S0_OUT_D3, OUT), + FUNC(I2S1_MCLK, OUT), + FUNC(I2S1_SCLK, OUT), + FUNC(I2S1_WS, OUT), + FUNC(I2S1_IN_D0, IN), + FUNC(I2S1_IN_D1, IN), + FUNC(I2S1_IN_D2, IN), + FUNC(I2S1_IN_D3, IN), + FUNC(I2S1_OUT_D0, OUT), + FUNC(I2S1_OUT_D1, OUT), + FUNC(I2S1_OUT_D2, OUT), + FUNC(I2S1_OUT_D3, OUT), + FUNC(I2S2_MCLK, OUT), + FUNC(I2S2_SCLK, OUT), + FUNC(I2S2_WS, OUT), + FUNC(I2S2_IN_D0, IN), + FUNC(I2S2_IN_D1, IN), + FUNC(I2S2_IN_D2, IN), + FUNC(I2S2_IN_D3, IN), + FUNC(I2S2_OUT_D0, OUT), + FUNC(I2S2_OUT_D1, OUT), + FUNC(I2S2_OUT_D2, OUT), + FUNC(I2S2_OUT_D3, OUT), + FUNC(RESV0, DISABLED), + FUNC(RESV1, DISABLED), + FUNC(RESV2, DISABLED), + FUNC(RESV3, DISABLED), + FUNC(RESV4, DISABLED), + FUNC(RESV5, DISABLED), + FUNC(I2C0_SCLK, I2C), + FUNC(I2C0_SDA, I2C), + FUNC(I2C1_SCLK, I2C), + FUNC(I2C1_SDA, I2C), + FUNC(I2C2_SCLK, I2C), + FUNC(I2C2_SDA, I2C), + FUNC(DVP_XCLK, OUT), + FUNC(DVP_RST, OUT), + FUNC(DVP_PWDN, OUT), + FUNC(DVP_VSYNC, IN), + FUNC(DVP_HSYNC, IN), + FUNC(DVP_PCLK, IN), + FUNC(DVP_D0, IN), + FUNC(DVP_D1, IN), + FUNC(DVP_D2, IN), + FUNC(DVP_D3, IN), + FUNC(DVP_D4, IN), + FUNC(DVP_D5, IN), + FUNC(DVP_D6, IN), + FUNC(DVP_D7, IN), + FUNC(SCCB_SCLK, I2C), + FUNC(SCCB_SDA, I2C), + FUNC(UART1_CTS, IN), + FUNC(UART1_DSR, IN), + FUNC(UART1_DCD, IN), + FUNC(UART1_RI, IN), + FUNC(UART1_SIR_IN, IN), + FUNC(UART1_DTR, OUT), + FUNC(UART1_RTS, OUT), + FUNC(UART1_OUT2, OUT), + FUNC(UART1_OUT1, OUT), + FUNC(UART1_SIR_OUT, OUT), + FUNC(UART1_BAUD, OUT), + FUNC(UART1_RE, OUT), + FUNC(UART1_DE, OUT), + FUNC(UART1_RS485_EN, OUT), + FUNC(UART2_CTS, IN), + FUNC(UART2_DSR, IN), + FUNC(UART2_DCD, IN), + FUNC(UART2_RI, IN), + FUNC(UART2_SIR_IN, IN), + FUNC(UART2_DTR, OUT), + FUNC(UART2_RTS, OUT), + FUNC(UART2_OUT2, OUT), + FUNC(UART2_OUT1, OUT), + FUNC(UART2_SIR_OUT, OUT), + FUNC(UART2_BAUD, OUT), + FUNC(UART2_RE, OUT), + FUNC(UART2_DE, OUT), + FUNC(UART2_RS485_EN, OUT), + FUNC(UART3_CTS, IN), + FUNC(UART3_DSR, IN), + FUNC(UART3_DCD, IN), + FUNC(UART3_RI, IN), + FUNC(UART3_SIR_IN, IN), + FUNC(UART3_DTR, OUT), + FUNC(UART3_RTS, OUT), + FUNC(UART3_OUT2, OUT), + FUNC(UART3_OUT1, OUT), + FUNC(UART3_SIR_OUT, OUT), + FUNC(UART3_BAUD, OUT), + FUNC(UART3_RE, OUT), + FUNC(UART3_DE, OUT), + FUNC(UART3_RS485_EN, OUT), + FUNC(TIMER0_TOGGLE1, OUT), + FUNC(TIMER0_TOGGLE2, OUT), + FUNC(TIMER0_TOGGLE3, OUT), + FUNC(TIMER0_TOGGLE4, OUT), + FUNC(TIMER1_TOGGLE1, OUT), + FUNC(TIMER1_TOGGLE2, OUT), + FUNC(TIMER1_TOGGLE3, OUT), + FUNC(TIMER1_TOGGLE4, OUT), + FUNC(TIMER2_TOGGLE1, OUT), + FUNC(TIMER2_TOGGLE2, OUT), + FUNC(TIMER2_TOGGLE3, OUT), + FUNC(TIMER2_TOGGLE4, OUT), + FUNC(CLK_SPI2, OUT), + FUNC(CLK_I2C2, OUT), + FUNC(INTERNAL0, OUT), + FUNC(INTERNAL1, OUT), + FUNC(INTERNAL2, OUT), + FUNC(INTERNAL3, OUT), + FUNC(INTERNAL4, OUT), + FUNC(INTERNAL5, OUT), + FUNC(INTERNAL6, OUT), + FUNC(INTERNAL7, OUT), + FUNC(INTERNAL8, OUT), + FUNC(INTERNAL9, IN), + FUNC(INTERNAL10, IN), + FUNC(INTERNAL11, IN), + FUNC(INTERNAL12, IN), + FUNC(INTERNAL13, INT13), + FUNC(INTERNAL14, I2C), + FUNC(INTERNAL15, IN), + FUNC(INTERNAL16, IN), + FUNC(INTERNAL17, IN), + FUNC(CONSTANT, DISABLED), + FUNC(INTERNAL18, IN), + FUNC(DEBUG0, OUT), + FUNC(DEBUG1, OUT), + FUNC(DEBUG2, OUT), + FUNC(DEBUG3, OUT), + FUNC(DEBUG4, OUT), + FUNC(DEBUG5, OUT), + FUNC(DEBUG6, OUT), + FUNC(DEBUG7, OUT), + FUNC(DEBUG8, OUT), + FUNC(DEBUG9, OUT), + FUNC(DEBUG10, OUT), + FUNC(DEBUG11, OUT), + FUNC(DEBUG12, OUT), + FUNC(DEBUG13, OUT), + FUNC(DEBUG14, OUT), + FUNC(DEBUG15, OUT), + FUNC(DEBUG16, OUT), + FUNC(DEBUG17, OUT), + FUNC(DEBUG18, OUT), + FUNC(DEBUG19, OUT), + FUNC(DEBUG20, OUT), + FUNC(DEBUG21, OUT), + FUNC(DEBUG22, OUT), + FUNC(DEBUG23, OUT), + FUNC(DEBUG24, OUT), + FUNC(DEBUG25, OUT), + FUNC(DEBUG26, OUT), + FUNC(DEBUG27, OUT), + FUNC(DEBUG28, OUT), + FUNC(DEBUG29, OUT), + FUNC(DEBUG30, OUT), + FUNC(DEBUG31, OUT), +#undef FUNC +}; + +static int k210_pc_pinmux_set(struct udevice *dev, u32 pinmux_group) +{ + unsigned pin = FIELD_GET(K210_PG_PIN, pinmux_group); + bool do_oe = FIELD_GET(K210_PG_DO, pinmux_group); + unsigned func = FIELD_GET(K210_PG_FUNC, pinmux_group); + struct k210_pc_priv *priv = dev_get_priv(dev); + const struct k210_pcf_info *info = &k210_pcf_infos[func]; + u32 mode = k210_pc_mode_id_to_mode[info->mode_id]; + u32 val = func | mode | (do_oe ? K210_PC_DO_OE : 0); + + debug("%s(%.8x): IO_%.2u = %3u | %.8x\n", __func__, pinmux_group, pin, + func, mode); + + writel(val, &priv->fpioa->pins[pin]); + return pin; +} + +/* Max drive strength in uA */ +static const int k210_pc_drive_strength[] = { + [0] = 11200, + [1] = 16800, + [2] = 22300, + [3] = 27800, + [4] = 33300, + [5] = 38700, + [6] = 44100, + [7] = 49500, +}; + +static int k210_pc_get_drive(unsigned max_strength_ua) +{ + int i; + + for (i = K210_PC_DRIVE_MAX; i; i--) + if (k210_pc_drive_strength[i] < max_strength_ua) + return i; + + return -EINVAL; +} + +static int k210_pc_pinconf_set(struct udevice *dev, unsigned pin_selector, + unsigned param, unsigned argument) +{ + struct k210_pc_priv *priv = dev_get_priv(dev); + u32 val = readl(&priv->fpioa->pins[pin_selector]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + val &= ~K210_PC_BIAS_MASK; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + if (argument) + val |= K210_PC_PD; + else + return -EINVAL; + break; + case PIN_CONFIG_BIAS_PULL_UP: + if (argument) + val |= K210_PC_PD; + else + return -EINVAL; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + argument *= 1000; + case PIN_CONFIG_DRIVE_STRENGTH_UA: { + int drive = k210_pc_get_drive(argument); + + if (IS_ERR_VALUE(drive)) + return drive; + val &= ~K210_PC_DRIVE_MASK; + val |= FIELD_PREP(K210_PC_DRIVE_MASK, drive); + break; + } + case PIN_CONFIG_INPUT_ENABLE: + if (argument) + val |= K210_PC_IE; + else + val &= ~K210_PC_IE; + break; + case PIN_CONFIG_INPUT_SCHMITT: + argument = 1; + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + if (argument) + val |= K210_PC_ST; + else + val &= ~K210_PC_ST; + break; + case PIN_CONFIG_OUTPUT: + k210_pc_pinmux_set(dev, + K210_FPIOA(pin_selector, K210_PCF_CONSTANT)); + val = readl(&priv->fpioa->pins[pin_selector]); + val |= K210_PC_MODE_OUT; + + if (!argument) + val |= K210_PC_DO_INV; + break; + case PIN_CONFIG_OUTPUT_ENABLE: + if (argument) + val |= K210_PC_OE; + else + val &= ~K210_PC_OE; + break; + case PIN_CONFIG_SLEW_RATE: + if (argument) + val |= K210_PC_SL; + else + val &= ~K210_PC_SL; + break; + case PIN_CONFIG_OUTPUT_INVERT: + if (argument) + val |= K210_PC_DO_INV; + else + val &= ~K210_PC_DO_INV; + break; + case PIN_CONFIG_INPUT_INVERT: + if (argument) + val |= K210_PC_DI_INV; + else + val &= ~K210_PC_DI_INV; + break; + default: + return -EINVAL; + } + + writel(val, &priv->fpioa->pins[pin_selector]); + return 0; +} + +static int k210_pc_pinconf_group_set(struct udevice *dev, + unsigned group_selector, unsigned param, + unsigned argument) +{ + struct k210_pc_priv *priv = dev_get_priv(dev); + + if (param == PIN_CONFIG_POWER_SOURCE) { + u32 bit = BIT(group_selector); + + regmap_update_bits(priv->sysctl, priv->power_offset, bit, + argument ? bit : 0); + } else { + return -EINVAL; + } + + return 0; +} + +#ifdef CONFIG_CMD_PINMUX +static int k210_pc_get_pin_muxing(struct udevice *dev, unsigned int selector, + char *buf, int size) +{ + struct k210_pc_priv *priv = dev_get_priv(dev); + u32 val = readl(&priv->fpioa->pins[selector]); + const struct k210_pcf_info *info = &k210_pcf_infos[val & K210_PCF_MASK]; + + strncpy(buf, info->name, min((size_t)size, sizeof(info->name))); + return 0; +} +#endif + +static const struct pinconf_param k210_pc_pinconf_params[] = { + { "bias-disable", PIN_CONFIG_BIAS_DISABLE, 0 }, + { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, + { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, + { "drive-strength", PIN_CONFIG_DRIVE_STRENGTH, U32_MAX }, + { "drive-strength-ua", PIN_CONFIG_DRIVE_STRENGTH_UA, U32_MAX }, + { "input-enable", PIN_CONFIG_INPUT_ENABLE, 1 }, + { "input-disable", PIN_CONFIG_INPUT_ENABLE, 0 }, + { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, + { "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 }, + { "power-source", PIN_CONFIG_POWER_SOURCE, K210_PC_POWER_1V8 }, + { "output-low", PIN_CONFIG_OUTPUT, 0 }, + { "output-high", PIN_CONFIG_OUTPUT, 1 }, + { "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 }, + { "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 }, + { "slew-rate", PIN_CONFIG_SLEW_RATE, 1 }, + { "output-polarity-invert", PIN_CONFIG_OUTPUT_INVERT, 1}, + { "input-polarity-invert", PIN_CONFIG_INPUT_INVERT, 1}, +}; + +static const struct pinctrl_ops k210_pc_pinctrl_ops = { +#ifdef CONFIG_CMD_PINMUX + .get_pins_count = k210_pc_get_pins_count, + .get_pin_name = k210_pc_get_pin_name, +#endif + .get_groups_count = k210_pc_get_groups_count, + .get_group_name = k210_pc_get_group_name, + .pinmux_property_set = k210_pc_pinmux_set, + .pinconf_num_params = ARRAY_SIZE(k210_pc_pinconf_params), + .pinconf_params = k210_pc_pinconf_params, + .pinconf_set = k210_pc_pinconf_set, + .pinconf_group_set = k210_pc_pinconf_group_set, + .set_state = pinctrl_generic_set_state, +#ifdef CONFIG_CMD_PINMUX + .get_pin_muxing = k210_pc_get_pin_muxing, +#endif +}; + +static int k210_pc_probe(struct udevice *dev) +{ + int ret, i, j; + struct k210_pc_priv *priv = dev_get_priv(dev); + + priv->fpioa = dev_read_addr_ptr(dev); + if (!priv->fpioa) + return -EINVAL; + + ret = clk_get_by_index(dev, 0, &priv->clk); + if (ret) + return ret; + + ret = clk_enable(&priv->clk); + if (ret && ret != -ENOSYS && ret != -ENOTSUPP) + goto err; + + priv->sysctl = syscon_regmap_lookup_by_phandle(dev, "kendryte,sysctl"); + if (IS_ERR(priv->sysctl)) { + ret = -ENODEV; + goto err; + } + + ret = dev_read_u32(dev, "kendryte,power-offset", &priv->power_offset); + if (ret) + goto err; + + debug("%s: fpioa = %p sysctl = %p power offset = %x\n", __func__, + priv->fpioa, (void *)priv->sysctl->ranges[0].start, + priv->power_offset); + + /* Init input ties */ + for (i = 0; i < ARRAY_SIZE(priv->fpioa->tie_en); i++) { + u32 val = 0; + + for (j = 0; j < 32; j++) + if (k210_pcf_infos[i * 32 + j].mode_id == + K210_PC_DEFAULT_IN_TIE) + val |= BIT(j); + writel(val, &priv->fpioa->tie_en[i]); + writel(val, &priv->fpioa->tie_val[i]); + } + + return 0; + +err: + clk_free(&priv->clk); + return ret; +} + +static const struct udevice_id k210_pc_ids[] = { + { .compatible = "kendryte,k210-fpioa" }, + { } +}; + +U_BOOT_DRIVER(pinctrl_k210) = { + .name = "pinctrl_k210", + .id = UCLASS_PINCTRL, + .of_match = k210_pc_ids, + .probe = k210_pc_probe, + .priv_auto_alloc_size = sizeof(struct k210_pc_priv), + .ops = &k210_pc_pinctrl_ops, +}; diff --git a/drivers/pinctrl/pinctrl-sandbox.c b/drivers/pinctrl/pinctrl-sandbox.c index ac0119d1988..d27f74248d0 100644 --- a/drivers/pinctrl/pinctrl-sandbox.c +++ b/drivers/pinctrl/pinctrl-sandbox.c @@ -1,57 +1,70 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> + * Copyright (C) 2020 Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> */ -/* #define DEBUG */ - #include <common.h> #include <dm.h> -#include <log.h> #include <dm/pinctrl.h> +#include <dt-bindings/pinctrl/sandbox-pinmux.h> +#include <log.h> #include <linux/bitops.h> +/* + * This driver emulates a pin controller with the following rules: + * - The pinctrl config for each pin must be set individually + * - The first three pins (P0-P2) must be muxed as a group + * - The next two pins (P3-P4) must be muxed as a group + * - The last four pins (P5-P8) must be muxed individually + */ + static const char * const sandbox_pins[] = { - "SCL", - "SDA", - "TX", - "RX", - "W1", - "GPIO0", - "GPIO1", - "GPIO2", - "GPIO3", +#define PIN(x) \ + [x] = "P" #x + PIN(0), + PIN(1), + PIN(2), + PIN(3), + PIN(4), + PIN(5), + PIN(6), + PIN(7), + PIN(8), +#undef PIN }; -static const char * const sandbox_pins_muxing[] = { - "I2C SCL", - "I2C SDA", - "Uart TX", - "Uart RX", - "1-wire gpio", - "gpio", - "gpio", - "gpio", - "gpio", +static const char * const sandbox_pins_muxing[][2] = { + { "UART TX", "I2C SCL" }, + { "UART RX", "I2C SDA" }, + { "SPI SCLK", "I2S SCK" }, + { "SPI MOSI", "I2S SD" }, + { "SPI MISO", "I2S WS" }, + { "GPIO0", "SPI CS0" }, + { "GPIO1", "SPI CS1" }, + { "GPIO2", "PWM0" }, + { "GPIO3", "PWM1" }, }; +#define SANDBOX_GROUP_I2C_UART 0 +#define SANDBOX_GROUP_SPI_I2S 1 + static const char * const sandbox_groups[] = { - "i2c", - "serial_a", - "serial_b", - "spi", - "w1", + [SANDBOX_GROUP_I2C_UART] = "I2C_UART", + [SANDBOX_GROUP_SPI_I2S] = "SPI_I2S", }; static const char * const sandbox_functions[] = { - "i2c", - "serial", - "spi", - "w1", - "gpio", - "gpio", - "gpio", - "gpio", +#define FUNC(id) \ + [SANDBOX_PINMUX_##id] = #id + FUNC(UART), + FUNC(I2C), + FUNC(SPI), + FUNC(I2S), + FUNC(GPIO), + FUNC(CS), + FUNC(PWM), +#undef FUNC }; static const struct pinconf_param sandbox_conf_params[] = { @@ -68,9 +81,12 @@ static const struct pinconf_param sandbox_conf_params[] = { { "input-disable", PIN_CONFIG_INPUT_ENABLE, 0 }, }; -/* bitfield used to save param and value of each pin/selector */ -static unsigned int sandbox_pins_param[ARRAY_SIZE(sandbox_pins)]; -static unsigned int sandbox_pins_value[ARRAY_SIZE(sandbox_pins)]; +/* Bitfield used to save param and value of each pin/selector */ +struct sandbox_pinctrl_priv { + unsigned int mux; + unsigned int pins_param[ARRAY_SIZE(sandbox_pins)]; + unsigned int pins_value[ARRAY_SIZE(sandbox_pins)]; +}; static int sandbox_get_pins_count(struct udevice *dev) { @@ -87,16 +103,18 @@ static int sandbox_get_pin_muxing(struct udevice *dev, char *buf, int size) { const struct pinconf_param *p; + struct sandbox_pinctrl_priv *priv = dev_get_priv(dev); int i; - snprintf(buf, size, "%s", sandbox_pins_muxing[selector]); + snprintf(buf, size, "%s", + sandbox_pins_muxing[selector][!!(priv->mux & BIT(selector))]); - if (sandbox_pins_param[selector]) { + if (priv->pins_param[selector]) { for (i = 0, p = sandbox_conf_params; i < ARRAY_SIZE(sandbox_conf_params); i++, p++) { - if ((sandbox_pins_param[selector] & BIT(p->param)) && - (!!(sandbox_pins_value[selector] & BIT(p->param)) == + if ((priv->pins_param[selector] & BIT(p->param)) && + (!!(priv->pins_value[selector] & BIT(p->param)) == p->default_value)) { strncat(buf, " ", size); strncat(buf, p->property, size); @@ -133,12 +151,32 @@ static const char *sandbox_get_function_name(struct udevice *dev, static int sandbox_pinmux_set(struct udevice *dev, unsigned pin_selector, unsigned func_selector) { + int mux; + struct sandbox_pinctrl_priv *priv = dev_get_priv(dev); + debug("sandbox pinmux: pin = %d (%s), function = %d (%s)\n", pin_selector, sandbox_get_pin_name(dev, pin_selector), func_selector, sandbox_get_function_name(dev, func_selector)); - sandbox_pins_param[pin_selector] = 0; - sandbox_pins_value[pin_selector] = 0; + if (pin_selector < 5) + return -EINVAL; + + switch (func_selector) { + case SANDBOX_PINMUX_GPIO: + mux = 0; + break; + case SANDBOX_PINMUX_CS: + case SANDBOX_PINMUX_PWM: + mux = BIT(pin_selector); + break; + default: + return -EINVAL; + } + + priv->mux &= ~BIT(pin_selector); + priv->mux |= mux; + priv->pins_param[pin_selector] = 0; + priv->pins_value[pin_selector] = 0; return 0; } @@ -147,25 +185,75 @@ static int sandbox_pinmux_group_set(struct udevice *dev, unsigned group_selector, unsigned func_selector) { + bool mux; + int i, group_start, group_end; + struct sandbox_pinctrl_priv *priv = dev_get_priv(dev); + unsigned int mask; + debug("sandbox pinmux: group = %d (%s), function = %d (%s)\n", group_selector, sandbox_get_group_name(dev, group_selector), func_selector, sandbox_get_function_name(dev, func_selector)); + if (group_selector == SANDBOX_GROUP_I2C_UART) { + group_start = 0; + group_end = 1; + + if (func_selector == SANDBOX_PINMUX_UART) + mux = false; + else if (func_selector == SANDBOX_PINMUX_I2C) + mux = true; + else + return -EINVAL; + } else if (group_selector == SANDBOX_GROUP_SPI_I2S) { + group_start = 2; + group_end = 4; + + if (func_selector == SANDBOX_PINMUX_SPI) + mux = false; + else if (func_selector == SANDBOX_PINMUX_I2S) + mux = true; + else + return -EINVAL; + } else { + return -EINVAL; + } + + mask = GENMASK(group_end, group_start); + priv->mux &= ~mask; + priv->mux |= mux ? mask : 0; + + for (i = group_start; i < group_end; i++) { + priv->pins_param[i] = 0; + priv->pins_value[i] = 0; + } + return 0; } +static int sandbox_pinmux_property_set(struct udevice *dev, u32 pinmux_group) +{ + int ret; + unsigned pin_selector = pinmux_group & 0xFFFF; + unsigned func_selector = pinmux_group >> 16; + + ret = sandbox_pinmux_set(dev, pin_selector, func_selector); + return ret ? ret : pin_selector; +} + static int sandbox_pinconf_set(struct udevice *dev, unsigned pin_selector, unsigned param, unsigned argument) { + struct sandbox_pinctrl_priv *priv = dev_get_priv(dev); + debug("sandbox pinconf: pin = %d (%s), param = %d, arg = %d\n", pin_selector, sandbox_get_pin_name(dev, pin_selector), param, argument); - sandbox_pins_param[pin_selector] |= BIT(param); + priv->pins_param[pin_selector] |= BIT(param); if (argument) - sandbox_pins_value[pin_selector] |= BIT(param); + priv->pins_value[pin_selector] |= BIT(param); else - sandbox_pins_value[pin_selector] &= ~BIT(param); + priv->pins_value[pin_selector] &= ~BIT(param); return 0; } @@ -191,6 +279,7 @@ const struct pinctrl_ops sandbox_pinctrl_ops = { .get_function_name = sandbox_get_function_name, .pinmux_set = sandbox_pinmux_set, .pinmux_group_set = sandbox_pinmux_group_set, + .pinmux_property_set = sandbox_pinmux_property_set, .pinconf_num_params = ARRAY_SIZE(sandbox_conf_params), .pinconf_params = sandbox_conf_params, .pinconf_set = sandbox_pinconf_set, @@ -207,5 +296,6 @@ U_BOOT_DRIVER(sandbox_pinctrl) = { .name = "sandbox_pinctrl", .id = UCLASS_PINCTRL, .of_match = sandbox_pinctrl_match, + .priv_auto_alloc_size = sizeof(struct sandbox_pinctrl_priv), .ops = &sandbox_pinctrl_ops, }; diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c index de22e49ebef..b13fc0ba632 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77990.c +++ b/drivers/pinctrl/renesas/pfc-r8a77990.c @@ -217,8 +217,8 @@ #define IP2_11_8 FM(AVB_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2_15_12 FM(BS_N) FM(PWM0_A) FM(AVB_MAGIC) FM(VI4_CLK) F_(0, 0) FM(TX3_C) F_(0, 0) FM(VI5_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2_19_16 FM(RD_N) FM(PWM1_A) FM(AVB_LINK) FM(VI4_FIELD) F_(0, 0) FM(RX3_C) FM(FSCLKST2_N_A) FM(VI5_DATA0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH_A) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE_A) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2_31_28 FM(A0) FM(IRQ0) FM(PWM2_A) FM(MSIOF3_SS1_B) FM(VI5_CLK_A) FM(DU_CDE) FM(HRX3_D) FM(IERX) FM(QSTB_QHE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP3_3_0 FM(A1) FM(IRQ1) FM(PWM3_A) FM(DU_DOTCLKIN1) FM(VI5_DATA0_A) FM(DU_DISP_CDE) FM(SDA6_B) FM(IETX) FM(QCPV_QDE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP3_7_4 FM(A2) FM(IRQ2) FM(AVB_AVTP_PPS) FM(VI4_CLKENB) FM(VI5_DATA1_A) FM(DU_DISP) FM(SCL6_B) F_(0, 0) FM(QSTVB_QVE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) @@ -433,6 +433,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM #define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0)) /* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ +#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) +#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1) #define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1) #define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1) #define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1) @@ -453,7 +455,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM #define PINMUX_MOD_SELS \ \ -MOD_SEL0_30_29 \ + MOD_SEL1_31 \ +MOD_SEL0_30_29 MOD_SEL1_30 \ MOD_SEL1_29 \ MOD_SEL0_28 MOD_SEL1_28 \ MOD_SEL0_27_26 \ @@ -619,7 +622,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP2_23_20, RD_WR_N), PINMUX_IPSR_MSEL(IP2_23_20, SCL7_A, SEL_I2C7_0), - PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH_A), + PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH), PINMUX_IPSR_GPSR(IP2_23_20, VI4_VSYNC_N), PINMUX_IPSR_GPSR(IP2_23_20, TX5_B), PINMUX_IPSR_MSEL(IP2_23_20, SCK3_C, SEL_SCIF3_2), @@ -627,7 +630,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP2_27_24, EX_WAIT0), PINMUX_IPSR_MSEL(IP2_27_24, SDA7_A, SEL_I2C7_0), - PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE_A), + PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE), PINMUX_IPSR_GPSR(IP2_27_24, VI4_HSYNC_N), PINMUX_IPSR_MSEL(IP2_27_24, RX5_B, SEL_SCIF5_1), PINMUX_IPSR_MSEL(IP2_27_24, PWM6_A, SEL_PWM6_0), @@ -1043,7 +1046,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1), PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1), PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0), - PINMUX_IPSR_GPSR(IP10_27_24, SSI_SCK2_B), + PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1), PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0), PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP), @@ -1052,7 +1055,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1), PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1), PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0), - PINMUX_IPSR_GPSR(IP10_31_28, SSI_WS2_B), + PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1), PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0), /* IPSR11 */ @@ -1070,13 +1073,13 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0), PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0), - PINMUX_IPSR_GPSR(IP11_11_8, SSI_SCK2_A), + PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0), PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC), PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1), PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0), PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A), - PINMUX_IPSR_GPSR(IP11_15_12, SSI_WS2_A), + PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0), PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0), PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1), @@ -1181,7 +1184,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0), PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1), PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1), - PINMUX_IPSR_GPSR(IP13_19_16, SIM0_D_A), + PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0), PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT), PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1), @@ -1249,7 +1252,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2), PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3), PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1), - PINMUX_IPSR_GPSR(IP15_15_12, SIM0_D_B), + PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1), PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6), PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0), @@ -1534,22 +1537,22 @@ static const unsigned int avb_avtp_pps_mux[] = { AVB_AVTP_PPS_MARK, }; -static const unsigned int avb_avtp_match_a_pins[] = { - /* AVB_AVTP_MATCH_A */ +static const unsigned int avb_avtp_match_pins[] = { + /* AVB_AVTP_MATCH */ RCAR_GP_PIN(2, 24), }; -static const unsigned int avb_avtp_match_a_mux[] = { - AVB_AVTP_MATCH_A_MARK, +static const unsigned int avb_avtp_match_mux[] = { + AVB_AVTP_MATCH_MARK, }; -static const unsigned int avb_avtp_capture_a_pins[] = { - /* AVB_AVTP_CAPTURE_A */ +static const unsigned int avb_avtp_capture_pins[] = { + /* AVB_AVTP_CAPTURE */ RCAR_GP_PIN(2, 25), }; -static const unsigned int avb_avtp_capture_a_mux[] = { - AVB_AVTP_CAPTURE_A_MARK, +static const unsigned int avb_avtp_capture_mux[] = { + AVB_AVTP_CAPTURE_MARK, }; /* - CAN ------------------------------------------------------------------ */ @@ -3794,8 +3797,8 @@ static const struct { SH_PFC_PIN_GROUP(avb_phy_int), SH_PFC_PIN_GROUP(avb_mii), SH_PFC_PIN_GROUP(avb_avtp_pps), - SH_PFC_PIN_GROUP(avb_avtp_match_a), - SH_PFC_PIN_GROUP(avb_avtp_capture_a), + SH_PFC_PIN_GROUP(avb_avtp_match), + SH_PFC_PIN_GROUP(avb_avtp_capture), SH_PFC_PIN_GROUP(can0_data), SH_PFC_PIN_GROUP(can1_data), SH_PFC_PIN_GROUP(can_clk), @@ -4071,8 +4074,8 @@ static const char * const avb_groups[] = { "avb_phy_int", "avb_mii", "avb_avtp_pps", - "avb_avtp_match_a", - "avb_avtp_capture_a", + "avb_avtp_match", + "avb_avtp_capture", }; static const char * const can0_groups[] = { @@ -4967,11 +4970,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_1_0 )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32, - GROUP(2, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, - 2, 2, 2, 1, 1, 2, 1, 4), + GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, + 1, 2, 2, 2, 1, 1, 2, 1, 4), GROUP( - /* RESERVED 31, 30 */ - 0, 0, 0, 0, + MOD_SEL1_31 + MOD_SEL1_30 MOD_SEL1_29 MOD_SEL1_28 /* RESERVED 27 */ diff --git a/drivers/power/regulator/gpio-regulator.c b/drivers/power/regulator/gpio-regulator.c index 947f812d099..28c9e222e2b 100644 --- a/drivers/power/regulator/gpio-regulator.c +++ b/drivers/power/regulator/gpio-regulator.c @@ -18,8 +18,6 @@ #define GPIO_REGULATOR_MAX_STATES 2 -DECLARE_GLOBAL_DATA_PTR; - struct gpio_regulator_platdata { struct regulator_common_platdata common; struct gpio_desc gpio; /* GPIO for regulator voltage control */ @@ -32,10 +30,8 @@ static int gpio_regulator_ofdata_to_platdata(struct udevice *dev) struct dm_regulator_uclass_platdata *uc_pdata; struct gpio_regulator_platdata *dev_pdata; struct gpio_desc *gpio; - const void *blob = gd->fdt_blob; - int node = dev_of_offset(dev); int ret, count, i, j; - u32 states_array[8]; + u32 states_array[GPIO_REGULATOR_MAX_STATES * 2]; dev_pdata = dev_get_platdata(dev); uc_pdata = dev_get_uclass_platdata(dev); @@ -57,11 +53,20 @@ static int gpio_regulator_ofdata_to_platdata(struct udevice *dev) if (ret) debug("regulator gpio - not found! Error: %d", ret); - count = fdtdec_get_int_array_count(blob, node, "states", - states_array, 8); + ret = dev_read_size(dev, "states"); + if (ret < 0) + return ret; - if (!count) - return -EINVAL; + count = ret / sizeof(states_array[0]); + if (count > ARRAY_SIZE(states_array)) { + debug("regulator gpio - to many states (%d > %d)", + count / 2, GPIO_REGULATOR_MAX_STATES); + count = ARRAY_SIZE(states_array); + } + + ret = dev_read_u32_array(dev, "states", states_array, count); + if (ret < 0) + return ret; for (i = 0, j = 0; i < count; i += 2) { dev_pdata->voltages[j] = states_array[i]; diff --git a/drivers/ram/Kconfig b/drivers/ram/Kconfig index a0e859afd6e..a270e13b265 100644 --- a/drivers/ram/Kconfig +++ b/drivers/ram/Kconfig @@ -73,6 +73,7 @@ config IMXRT_SDRAM to support external memories like sdram, psram & nand. This driver is for the sdram memory interface with the SEMC. +source "drivers/ram/aspeed/Kconfig" source "drivers/ram/rockchip/Kconfig" source "drivers/ram/sifive/Kconfig" source "drivers/ram/stm32mp1/Kconfig" diff --git a/drivers/ram/Makefile b/drivers/ram/Makefile index d685a579a0f..209a78c06f5 100644 --- a/drivers/ram/Makefile +++ b/drivers/ram/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ obj-$(CONFIG_K3_AM654_DDRSS) += k3-am654-ddrss.o obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ +obj-$(CONFIG_ARCH_ASPEED) += aspeed/ obj-$(CONFIG_K3_J721E_DDRSS) += k3-j721e/ obj-$(CONFIG_IMXRT_SDRAM) += imxrt_sdram.o diff --git a/drivers/ram/aspeed/Kconfig b/drivers/ram/aspeed/Kconfig new file mode 100644 index 00000000000..020c9131882 --- /dev/null +++ b/drivers/ram/aspeed/Kconfig @@ -0,0 +1,10 @@ +if RAM || SPL_RAM +config ASPEED_DDR4_DUALX8 + bool "Enable Dual X8 DDR4 die" + depends on DM && OF_CONTROL && ARCH_ASPEED + default n + help + Say Y if dual X8 DDR4 die is used on the board. The aspeed ddr sdram + controller needs to know if the memory chip mounted on the board is dual + x8 die or not. Or it may get the wrong size of the memory space. +endif diff --git a/drivers/ram/aspeed/Makefile b/drivers/ram/aspeed/Makefile new file mode 100644 index 00000000000..af604f8a4b0 --- /dev/null +++ b/drivers/ram/aspeed/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +obj-$(CONFIG_ASPEED_AST2500) += sdram_ast2500.o
\ No newline at end of file diff --git a/drivers/ram/aspeed/sdram_ast2500.c b/drivers/ram/aspeed/sdram_ast2500.c new file mode 100644 index 00000000000..9f4304cb663 --- /dev/null +++ b/drivers/ram/aspeed/sdram_ast2500.c @@ -0,0 +1,439 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2020 ASPEED Technology Inc. + * + * Copyright 2016 Google, Inc + */ + +#include <common.h> +#include <clk.h> +#include <dm.h> +#include <errno.h> +#include <log.h> +#include <ram.h> +#include <regmap.h> +#include <reset.h> +#include <asm/io.h> +#include <asm/arch/scu_ast2500.h> +#include <asm/arch/sdram_ast2500.h> +#include <asm/arch/wdt.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <dt-bindings/clock/aspeed-clock.h> + +/* These configuration parameters are taken from Aspeed SDK */ +#define DDR4_MR46_MODE 0x08000000 +#define DDR4_MR5_MODE 0x400 +#define DDR4_MR13_MODE 0x101 +#define DDR4_MR02_MODE 0x410 +#define DDR4_TRFC 0x45457188 + +#define PHY_CFG_SIZE 15 + +static const u32 ddr4_ac_timing[3] = {0x63604e37, 0xe97afa99, 0x00019000}; +static const struct { + u32 index[PHY_CFG_SIZE]; + u32 value[PHY_CFG_SIZE]; +} ddr4_phy_config = { + .index = {0, 1, 3, 4, 5, 56, 57, 58, 59, 60, 61, 62, 36, 49, 50}, + .value = { + 0x42492aae, 0x09002000, 0x55e00b0b, 0x20000000, 0x24, + 0x03002900, 0x0e0000a0, 0x000e001c, 0x35b8c106, 0x08080607, + 0x9b000900, 0x0e400a00, 0x00100008, 0x3c183c3c, 0x00631e0e, + }, +}; + +#define SDRAM_MAX_SIZE (1024 * 1024 * 1024) +#define SDRAM_MIN_SIZE (128 * 1024 * 1024) + +DECLARE_GLOBAL_DATA_PTR; + +/* + * Bandwidth configuration parameters for different SDRAM requests. + * These are hardcoded settings taken from Aspeed SDK. + */ +static const u32 ddr_max_grant_params[4] = { + 0x88448844, 0x24422288, 0x22222222, 0x22222222 +}; + +/* + * These registers are not documented by Aspeed at all. + * All writes and reads are taken pretty much as is from SDK. + */ +struct ast2500_ddr_phy { + u32 phy[117]; +}; + +struct dram_info { + struct ram_info info; + struct clk ddr_clk; + struct ast2500_sdrammc_regs *regs; + struct ast2500_scu *scu; + struct ast2500_ddr_phy *phy; + ulong clock_rate; +}; + +static int ast2500_sdrammc_init_phy(struct ast2500_ddr_phy *phy) +{ + writel(0, &phy->phy[2]); + writel(0, &phy->phy[6]); + writel(0, &phy->phy[8]); + writel(0, &phy->phy[10]); + writel(0, &phy->phy[12]); + writel(0, &phy->phy[42]); + writel(0, &phy->phy[44]); + + writel(0x86000000, &phy->phy[16]); + writel(0x00008600, &phy->phy[17]); + writel(0x80000000, &phy->phy[18]); + writel(0x80808080, &phy->phy[19]); + + return 0; +} + +static void ast2500_ddr_phy_init_process(struct dram_info *info) +{ + struct ast2500_sdrammc_regs *regs = info->regs; + + writel(0, ®s->phy_ctrl[0]); + writel(0x4040, &info->phy->phy[51]); + + writel(SDRAM_PHYCTRL0_NRST | SDRAM_PHYCTRL0_INIT, ®s->phy_ctrl[0]); + while ((readl(®s->phy_ctrl[0]) & SDRAM_PHYCTRL0_INIT)) + ; + writel(SDRAM_PHYCTRL0_NRST | SDRAM_PHYCTRL0_AUTO_UPDATE, + ®s->phy_ctrl[0]); +} + +static void ast2500_sdrammc_set_vref(struct dram_info *info, u32 vref) +{ + writel(0, &info->regs->phy_ctrl[0]); + writel((vref << 8) | 0x6, &info->phy->phy[48]); + ast2500_ddr_phy_init_process(info); +} + +static int ast2500_ddr_cbr_test(struct dram_info *info) +{ + struct ast2500_sdrammc_regs *regs = info->regs; + int i; + const u32 test_params = SDRAM_TEST_EN + | SDRAM_TEST_ERRSTOP + | SDRAM_TEST_TWO_MODES; + int ret = 0; + + writel((1 << SDRAM_REFRESH_CYCLES_SHIFT) | + (0x5c << SDRAM_REFRESH_PERIOD_SHIFT), ®s->refresh_timing); + writel((0xfff << SDRAM_TEST_LEN_SHIFT), ®s->test_addr); + writel(0xff00ff00, ®s->test_init_val); + writel(SDRAM_TEST_EN | (SDRAM_TEST_MODE_RW << SDRAM_TEST_MODE_SHIFT) | + SDRAM_TEST_ERRSTOP, ®s->ecc_test_ctrl); + + while (!(readl(®s->ecc_test_ctrl) & SDRAM_TEST_DONE)) + ; + + if (readl(®s->ecc_test_ctrl) & SDRAM_TEST_FAIL) { + ret = -EIO; + } else { + for (i = 0; i <= SDRAM_TEST_GEN_MODE_MASK; ++i) { + writel((i << SDRAM_TEST_GEN_MODE_SHIFT) | test_params, + ®s->ecc_test_ctrl); + while (!(readl(®s->ecc_test_ctrl) & SDRAM_TEST_DONE)) + ; + if (readl(®s->ecc_test_ctrl) & SDRAM_TEST_FAIL) { + ret = -EIO; + break; + } + } + } + + writel(0, ®s->refresh_timing); + writel(0, ®s->ecc_test_ctrl); + + return ret; +} + +static int ast2500_sdrammc_ddr4_calibrate_vref(struct dram_info *info) +{ + int i; + int vref_min = 0xff; + int vref_max = 0; + int range_size = 0; + + for (i = 1; i < 0x40; ++i) { + int res; + + ast2500_sdrammc_set_vref(info, i); + res = ast2500_ddr_cbr_test(info); + if (res < 0) { + if (range_size > 0) + break; + } else { + ++range_size; + vref_min = min(vref_min, i); + vref_max = max(vref_max, i); + } + } + + /* Pick average setting */ + ast2500_sdrammc_set_vref(info, (vref_min + vref_max + 1) / 2); + + return 0; +} + +static size_t ast2500_sdrammc_get_vga_mem_size(struct dram_info *info) +{ + size_t vga_mem_size_base = 8 * 1024 * 1024; + u32 vga_hwconf = (readl(&info->scu->hwstrap) & SCU_HWSTRAP_VGAMEM_MASK) + >> SCU_HWSTRAP_VGAMEM_SHIFT; + + return vga_mem_size_base << vga_hwconf; +} + +/* + * Find out RAM size and save it in dram_info + * + * The procedure is taken from Aspeed SDK + */ +static void ast2500_sdrammc_calc_size(struct dram_info *info) +{ + /* The controller supports 128/256/512/1024 MB ram */ + size_t ram_size = SDRAM_MIN_SIZE; + const int write_test_offset = 0x100000; + u32 test_pattern = 0xdeadbeef; + u32 cap_param = SDRAM_CONF_CAP_1024M; + u32 refresh_timing_param = DDR4_TRFC; + const u32 write_addr_base = CONFIG_SYS_SDRAM_BASE + write_test_offset; + + for (ram_size = SDRAM_MAX_SIZE; ram_size > SDRAM_MIN_SIZE; + ram_size >>= 1) { + writel(test_pattern, write_addr_base + (ram_size >> 1)); + test_pattern = (test_pattern >> 4) | (test_pattern << 28); + } + + /* One last write to overwrite all wrapped values */ + writel(test_pattern, write_addr_base); + + /* Reset the pattern and see which value was really written */ + test_pattern = 0xdeadbeef; + for (ram_size = SDRAM_MAX_SIZE; ram_size > SDRAM_MIN_SIZE; + ram_size >>= 1) { + if (readl(write_addr_base + (ram_size >> 1)) == test_pattern) + break; + + --cap_param; + refresh_timing_param >>= 8; + test_pattern = (test_pattern >> 4) | (test_pattern << 28); + } + + clrsetbits_le32(&info->regs->ac_timing[1], + (SDRAM_AC_TRFC_MASK << SDRAM_AC_TRFC_SHIFT), + ((refresh_timing_param & SDRAM_AC_TRFC_MASK) + << SDRAM_AC_TRFC_SHIFT)); + + info->info.base = CONFIG_SYS_SDRAM_BASE; + info->info.size = ram_size - ast2500_sdrammc_get_vga_mem_size(info); + clrsetbits_le32(&info->regs->config, + (SDRAM_CONF_CAP_MASK << SDRAM_CONF_CAP_SHIFT), + ((cap_param & SDRAM_CONF_CAP_MASK) + << SDRAM_CONF_CAP_SHIFT)); +} + +static int ast2500_sdrammc_init_ddr4(struct dram_info *info) +{ + int i; + const u32 power_control = SDRAM_PCR_CKE_EN + | (1 << SDRAM_PCR_CKE_DELAY_SHIFT) + | (2 << SDRAM_PCR_TCKE_PW_SHIFT) + | SDRAM_PCR_RESETN_DIS + | SDRAM_PCR_RGAP_CTRL_EN | SDRAM_PCR_ODT_EN | SDRAM_PCR_ODT_EXT_EN; + const u32 conf = (SDRAM_CONF_CAP_1024M << SDRAM_CONF_CAP_SHIFT) +#ifdef CONFIG_ASPEED_DDR4_DUALX8 + | SDRAM_CONF_DUALX8 +#endif + | SDRAM_CONF_SCRAMBLE | SDRAM_CONF_SCRAMBLE_PAT2 | SDRAM_CONF_DDR4; + int ret; + + writel(conf, &info->regs->config); + for (i = 0; i < ARRAY_SIZE(ddr4_ac_timing); ++i) + writel(ddr4_ac_timing[i], &info->regs->ac_timing[i]); + + writel(DDR4_MR46_MODE, &info->regs->mr46_mode_setting); + writel(DDR4_MR5_MODE, &info->regs->mr5_mode_setting); + writel(DDR4_MR02_MODE, &info->regs->mr02_mode_setting); + writel(DDR4_MR13_MODE, &info->regs->mr13_mode_setting); + + for (i = 0; i < PHY_CFG_SIZE; ++i) { + writel(ddr4_phy_config.value[i], + &info->phy->phy[ddr4_phy_config.index[i]]); + } + + writel(power_control, &info->regs->power_control); + + ast2500_ddr_phy_init_process(info); + + ret = ast2500_sdrammc_ddr4_calibrate_vref(info); + if (ret < 0) { + debug("Vref calibration failed!\n"); + return ret; + } + + writel((1 << SDRAM_REFRESH_CYCLES_SHIFT) + | SDRAM_REFRESH_ZQCS_EN | (0x2f << SDRAM_REFRESH_PERIOD_SHIFT), + &info->regs->refresh_timing); + + setbits_le32(&info->regs->power_control, + SDRAM_PCR_AUTOPWRDN_EN | SDRAM_PCR_ODT_AUTO_ON); + + ast2500_sdrammc_calc_size(info); + + setbits_le32(&info->regs->config, SDRAM_CONF_CACHE_INIT_EN); + while (!(readl(&info->regs->config) & SDRAM_CONF_CACHE_INIT_DONE)) + ; + setbits_le32(&info->regs->config, SDRAM_CONF_CACHE_EN); + + writel(SDRAM_MISC_DDR4_TREFRESH, &info->regs->misc_control); + + /* Enable all requests except video & display */ + writel(SDRAM_REQ_USB20_EHCI1 + | SDRAM_REQ_USB20_EHCI2 + | SDRAM_REQ_CPU + | SDRAM_REQ_AHB2 + | SDRAM_REQ_AHB + | SDRAM_REQ_MAC0 + | SDRAM_REQ_MAC1 + | SDRAM_REQ_PCIE + | SDRAM_REQ_XDMA + | SDRAM_REQ_ENCRYPTION + | SDRAM_REQ_VIDEO_FLAG + | SDRAM_REQ_VIDEO_LOW_PRI_WRITE + | SDRAM_REQ_2D_RW + | SDRAM_REQ_MEMCHECK, &info->regs->req_limit_mask); + + return 0; +} + +static void ast2500_sdrammc_unlock(struct dram_info *info) +{ + writel(SDRAM_UNLOCK_KEY, &info->regs->protection_key); + while (!readl(&info->regs->protection_key)) + ; +} + +static void ast2500_sdrammc_lock(struct dram_info *info) +{ + writel(~SDRAM_UNLOCK_KEY, &info->regs->protection_key); + while (readl(&info->regs->protection_key)) + ; +} + +static int ast2500_sdrammc_probe(struct udevice *dev) +{ + struct reset_ctl reset_ctl; + struct dram_info *priv = (struct dram_info *)dev_get_priv(dev); + struct ast2500_sdrammc_regs *regs = priv->regs; + int i; + int ret = clk_get_by_index(dev, 0, &priv->ddr_clk); + + if (ret) { + debug("DDR:No CLK\n"); + return ret; + } + + priv->scu = ast_get_scu(); + if (IS_ERR(priv->scu)) { + debug("%s(): can't get SCU\n", __func__); + return PTR_ERR(priv->scu); + } + + clk_set_rate(&priv->ddr_clk, priv->clock_rate); + ret = reset_get_by_index(dev, 0, &reset_ctl); + if (ret) { + debug("%s(): Failed to get reset signal\n", __func__); + return ret; + } + + ret = reset_assert(&reset_ctl); + if (ret) { + debug("%s(): SDRAM reset failed: %u\n", __func__, ret); + return ret; + } + + ast2500_sdrammc_unlock(priv); + + writel(SDRAM_PCR_MREQI_DIS | SDRAM_PCR_RESETN_DIS, + ®s->power_control); + writel(SDRAM_VIDEO_UNLOCK_KEY, ®s->gm_protection_key); + + /* Mask all requests except CPU and AHB during PHY init */ + writel(~(SDRAM_REQ_CPU | SDRAM_REQ_AHB), ®s->req_limit_mask); + + for (i = 0; i < ARRAY_SIZE(ddr_max_grant_params); ++i) + writel(ddr_max_grant_params[i], ®s->max_grant_len[i]); + + setbits_le32(®s->intr_ctrl, SDRAM_ICR_RESET_ALL); + + ast2500_sdrammc_init_phy(priv->phy); + if (readl(&priv->scu->hwstrap) & SCU_HWSTRAP_DDR4) { + ast2500_sdrammc_init_ddr4(priv); + } else { + debug("Unsupported DRAM3\n"); + return -EINVAL; + } + + clrbits_le32(®s->intr_ctrl, SDRAM_ICR_RESET_ALL); + ast2500_sdrammc_lock(priv); + + return 0; +} + +static int ast2500_sdrammc_ofdata_to_platdata(struct udevice *dev) +{ + struct dram_info *priv = dev_get_priv(dev); + struct regmap *map; + int ret; + + ret = regmap_init_mem(dev_ofnode(dev), &map); + if (ret) + return ret; + + priv->regs = regmap_get_range(map, 0); + priv->phy = regmap_get_range(map, 1); + + priv->clock_rate = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), + "clock-frequency", 0); + + if (!priv->clock_rate) { + debug("DDR Clock Rate not defined\n"); + return -EINVAL; + } + + return 0; +} + +static int ast2500_sdrammc_get_info(struct udevice *dev, struct ram_info *info) +{ + struct dram_info *priv = dev_get_priv(dev); + + *info = priv->info; + + return 0; +} + +static struct ram_ops ast2500_sdrammc_ops = { + .get_info = ast2500_sdrammc_get_info, +}; + +static const struct udevice_id ast2500_sdrammc_ids[] = { + { .compatible = "aspeed,ast2500-sdrammc" }, + { } +}; + +U_BOOT_DRIVER(sdrammc_ast2500) = { + .name = "aspeed_ast2500_sdrammc", + .id = UCLASS_RAM, + .of_match = ast2500_sdrammc_ids, + .ops = &ast2500_sdrammc_ops, + .ofdata_to_platdata = ast2500_sdrammc_ofdata_to_platdata, + .probe = ast2500_sdrammc_probe, + .priv_auto_alloc_size = sizeof(struct dram_info), +}; diff --git a/drivers/ram/imxrt_sdram.c b/drivers/ram/imxrt_sdram.c index 765a2141d13..b6ee02d2279 100644 --- a/drivers/ram/imxrt_sdram.c +++ b/drivers/ram/imxrt_sdram.c @@ -7,6 +7,7 @@ #include <common.h> #include <clk.h> #include <dm.h> +#include <dm/device_compat.h> #include <init.h> #include <log.h> #include <ram.h> diff --git a/drivers/ram/stm32mp1/stm32mp1_interactive.c b/drivers/ram/stm32mp1/stm32mp1_interactive.c index 38390c0d552..5a5d0670461 100644 --- a/drivers/ram/stm32mp1/stm32mp1_interactive.c +++ b/drivers/ram/stm32mp1/stm32mp1_interactive.c @@ -394,7 +394,7 @@ bool stm32mp1_ddr_interactive(void *priv, unsigned long start = get_timer(0); while (1) { - if (tstc() && (getc() == 'd')) { + if (tstc() && (getchar() == 'd')) { next_step = STEP_DDR_RESET; break; } diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index b60e11f98b2..33c2736554e 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -72,15 +72,14 @@ config RESET_UNIPHIER Say Y if you want to control reset signals provided by System Control block, Media I/O block, Peripheral Block. -config AST2500_RESET +config RESET_AST2500 bool "Reset controller driver for AST2500 SoCs" - depends on DM_RESET && WDT_ASPEED + depends on DM_RESET default y if ASPEED_AST2500 help - Support for reset controller on AST2500 SoC. This controller uses - watchdog to reset different peripherals and thus only supports - resets that are supported by watchdog. The main limitation though - is that some reset signals, like I2C or MISC reset multiple devices. + Support for reset controller on AST2500 SoC. + Say Y if you want to control reset signals of different peripherals + through System Control Unit (SCU). config RESET_ROCKCHIP bool "Reset controller driver for Rockchip SoCs" diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 10a7973f823..fa52aa33291 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -14,7 +14,7 @@ obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o -obj-$(CONFIG_AST2500_RESET) += ast2500-reset.o +obj-$(CONFIG_RESET_AST2500) += reset-ast2500.o obj-$(CONFIG_RESET_ROCKCHIP) += reset-rockchip.o obj-$(CONFIG_RESET_MESON) += reset-meson.o obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o diff --git a/drivers/reset/ast2500-reset.c b/drivers/reset/ast2500-reset.c deleted file mode 100644 index beb5cd8fa8c..00000000000 --- a/drivers/reset/ast2500-reset.c +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2017 Google, Inc - */ - -#include <common.h> -#include <dm.h> -#include <log.h> -#include <misc.h> -#include <reset.h> -#include <reset-uclass.h> -#include <wdt.h> -#include <asm/io.h> -#include <asm/arch/scu_ast2500.h> -#include <asm/arch/wdt.h> - -struct ast2500_reset_priv { - /* WDT used to perform resets. */ - struct udevice *wdt; - struct ast2500_scu *scu; -}; - -static int ast2500_ofdata_to_platdata(struct udevice *dev) -{ - struct ast2500_reset_priv *priv = dev_get_priv(dev); - int ret; - - ret = uclass_get_device_by_phandle(UCLASS_WDT, dev, "aspeed,wdt", - &priv->wdt); - if (ret) { - debug("%s: can't find WDT for reset controller", __func__); - return ret; - } - - return 0; -} - -static int ast2500_reset_assert(struct reset_ctl *reset_ctl) -{ - struct ast2500_reset_priv *priv = dev_get_priv(reset_ctl->dev); - u32 reset_mode, reset_mask; - bool reset_sdram; - int ret; - - /* - * To reset SDRAM, a specifal flag in SYSRESET register - * needs to be enabled first - */ - reset_mode = ast_reset_mode_from_flags(reset_ctl->id); - reset_mask = ast_reset_mask_from_flags(reset_ctl->id); - reset_sdram = reset_mode == WDT_CTRL_RESET_SOC && - (reset_mask & WDT_RESET_SDRAM); - - if (reset_sdram) { - ast_scu_unlock(priv->scu); - setbits_le32(&priv->scu->sysreset_ctrl1, - SCU_SYSRESET_SDRAM_WDT); - ret = wdt_expire_now(priv->wdt, reset_ctl->id); - clrbits_le32(&priv->scu->sysreset_ctrl1, - SCU_SYSRESET_SDRAM_WDT); - ast_scu_lock(priv->scu); - } else { - ret = wdt_expire_now(priv->wdt, reset_ctl->id); - } - - return ret; -} - -static int ast2500_reset_request(struct reset_ctl *reset_ctl) -{ - debug("%s(reset_ctl=%p) (dev=%p, id=%lu)\n", __func__, reset_ctl, - reset_ctl->dev, reset_ctl->id); - - return 0; -} - -static int ast2500_reset_probe(struct udevice *dev) -{ - struct ast2500_reset_priv *priv = dev_get_priv(dev); - - priv->scu = ast_get_scu(); - - return 0; -} - -static const struct udevice_id ast2500_reset_ids[] = { - { .compatible = "aspeed,ast2500-reset" }, - { } -}; - -struct reset_ops ast2500_reset_ops = { - .rst_assert = ast2500_reset_assert, - .request = ast2500_reset_request, -}; - -U_BOOT_DRIVER(ast2500_reset) = { - .name = "ast2500_reset", - .id = UCLASS_RESET, - .of_match = ast2500_reset_ids, - .probe = ast2500_reset_probe, - .ops = &ast2500_reset_ops, - .ofdata_to_platdata = ast2500_ofdata_to_platdata, - .priv_auto_alloc_size = sizeof(struct ast2500_reset_priv), -}; diff --git a/drivers/reset/reset-ast2500.c b/drivers/reset/reset-ast2500.c new file mode 100644 index 00000000000..e7b5c7decab --- /dev/null +++ b/drivers/reset/reset-ast2500.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2017 Google, Inc + * Copyright 2020 ASPEED Technology Inc. + */ + +#include <common.h> +#include <dm.h> +#include <log.h> +#include <misc.h> +#include <reset.h> +#include <reset-uclass.h> +#include <linux/err.h> +#include <asm/io.h> +#include <asm/arch/scu_ast2500.h> + +struct ast2500_reset_priv { + struct ast2500_scu *scu; +}; + +static int ast2500_reset_request(struct reset_ctl *reset_ctl) +{ + debug("%s(reset_ctl=%p) (dev=%p, id=%lu)\n", __func__, reset_ctl, + reset_ctl->dev, reset_ctl->id); + + return 0; +} + +static int ast2500_reset_free(struct reset_ctl *reset_ctl) +{ + debug("%s(reset_ctl=%p) (dev=%p, id=%lu)\n", __func__, reset_ctl, + reset_ctl->dev, reset_ctl->id); + + return 0; +} + +static int ast2500_reset_assert(struct reset_ctl *reset_ctl) +{ + struct ast2500_reset_priv *priv = dev_get_priv(reset_ctl->dev); + struct ast2500_scu *scu = priv->scu; + + debug("%s: reset_ctl->id: %lu\n", __func__, reset_ctl->id); + + if (reset_ctl->id < 32) + setbits_le32(&scu->sysreset_ctrl1, BIT(reset_ctl->id)); + else + setbits_le32(&scu->sysreset_ctrl2, BIT(reset_ctl->id - 32)); + + return 0; +} + +static int ast2500_reset_deassert(struct reset_ctl *reset_ctl) +{ + struct ast2500_reset_priv *priv = dev_get_priv(reset_ctl->dev); + struct ast2500_scu *scu = priv->scu; + + debug("%s: reset_ctl->id: %lu\n", __func__, reset_ctl->id); + + if (reset_ctl->id < 32) + clrbits_le32(&scu->sysreset_ctrl1, BIT(reset_ctl->id)); + else + clrbits_le32(&scu->sysreset_ctrl2, BIT(reset_ctl->id - 32)); + + return 0; +} + +static int ast2500_reset_probe(struct udevice *dev) +{ + int rc; + struct ast2500_reset_priv *priv = dev_get_priv(dev); + struct udevice *scu_dev; + + /* get SCU base from clock device */ + rc = uclass_get_device_by_driver(UCLASS_CLK, + DM_GET_DRIVER(aspeed_ast2500_scu), &scu_dev); + if (rc) { + debug("%s: clock device not found, rc=%d\n", __func__, rc); + return rc; + } + + priv->scu = devfdt_get_addr_ptr(scu_dev); + if (IS_ERR_OR_NULL(priv->scu)) { + debug("%s: invalid SCU base pointer\n", __func__); + return PTR_ERR(priv->scu); + } + + return 0; +} + +static const struct udevice_id ast2500_reset_ids[] = { + { .compatible = "aspeed,ast2500-reset" }, + { } +}; + +struct reset_ops ast2500_reset_ops = { + .request = ast2500_reset_request, + .rfree = ast2500_reset_free, + .rst_assert = ast2500_reset_assert, + .rst_deassert = ast2500_reset_deassert, +}; + +U_BOOT_DRIVER(ast2500_reset) = { + .name = "ast2500_reset", + .id = UCLASS_RESET, + .of_match = ast2500_reset_ids, + .probe = ast2500_reset_probe, + .ops = &ast2500_reset_ops, + .priv_auto_alloc_size = sizeof(struct ast2500_reset_priv), +}; diff --git a/drivers/rng/Kconfig b/drivers/rng/Kconfig index e4b22d79ebc..11001c8ae7c 100644 --- a/drivers/rng/Kconfig +++ b/drivers/rng/Kconfig @@ -24,6 +24,13 @@ config RNG_SANDBOX Enable random number generator for sandbox. This is an emulation of a rng device. +config RNG_MSM + bool "Qualcomm SoCs Random Number Generator support" + depends on DM_RNG + help + This driver provides support for the Random Number + Generator hardware found on Qualcomm SoCs. + config RNG_STM32MP1 bool "Enable random number generator for STM32MP1" depends on ARCH_STM32MP diff --git a/drivers/rng/Makefile b/drivers/rng/Makefile index 44a00039173..89534068820 100644 --- a/drivers/rng/Makefile +++ b/drivers/rng/Makefile @@ -6,5 +6,6 @@ obj-$(CONFIG_DM_RNG) += rng-uclass.o obj-$(CONFIG_RNG_MESON) += meson-rng.o obj-$(CONFIG_RNG_SANDBOX) += sandbox_rng.o +obj-$(CONFIG_RNG_MSM) += msm_rng.o obj-$(CONFIG_RNG_STM32MP1) += stm32mp1_rng.o obj-$(CONFIG_RNG_ROCKCHIP) += rockchip_rng.o diff --git a/drivers/rng/msm_rng.c b/drivers/rng/msm_rng.c new file mode 100644 index 00000000000..d51119303a6 --- /dev/null +++ b/drivers/rng/msm_rng.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PRNG driver for Qualcomm IPQ40xx + * + * Copyright (c) 2020 Sartura Ltd. + * + * Author: Robert Marko <robert.marko@sartura.hr> + * + * Based on Linux driver + */ + +#include <asm/io.h> +#include <clk.h> +#include <common.h> +#include <dm.h> +#include <linux/bitops.h> +#include <rng.h> + +/* Device specific register offsets */ +#define PRNG_DATA_OUT 0x0000 +#define PRNG_STATUS 0x0004 +#define PRNG_LFSR_CFG 0x0100 +#define PRNG_CONFIG 0x0104 + +/* Device specific register masks and config values */ +#define PRNG_LFSR_CFG_MASK 0x0000ffff +#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd +#define PRNG_CONFIG_HW_ENABLE BIT(1) +#define PRNG_STATUS_DATA_AVAIL BIT(0) + +#define MAX_HW_FIFO_DEPTH 16 +#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) +#define WORD_SZ 4 + +struct msm_rng_priv { + phys_addr_t base; + struct clk clk; +}; + +static int msm_rng_read(struct udevice *dev, void *data, size_t len) +{ + struct msm_rng_priv *priv = dev_get_priv(dev); + size_t currsize = 0; + u32 *retdata = data; + size_t maxsize; + u32 val; + + /* calculate max size bytes to transfer back to caller */ + maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, len); + + /* read random data from hardware */ + do { + val = readl_relaxed(priv->base + PRNG_STATUS); + if (!(val & PRNG_STATUS_DATA_AVAIL)) + break; + + val = readl_relaxed(priv->base + PRNG_DATA_OUT); + if (!val) + break; + + *retdata++ = val; + currsize += WORD_SZ; + + /* make sure we stay on 32bit boundary */ + if ((maxsize - currsize) < WORD_SZ) + break; + } while (currsize < maxsize); + + return 0; +} + +static int msm_rng_enable(struct msm_rng_priv *priv, int enable) +{ + u32 val; + + if (enable) { + /* Enable PRNG only if it is not already enabled */ + val = readl_relaxed(priv->base + PRNG_CONFIG); + if (val & PRNG_CONFIG_HW_ENABLE) { + val = readl_relaxed(priv->base + PRNG_LFSR_CFG); + val &= ~PRNG_LFSR_CFG_MASK; + val |= PRNG_LFSR_CFG_CLOCKS; + writel(val, priv->base + PRNG_LFSR_CFG); + + val = readl_relaxed(priv->base + PRNG_CONFIG); + val |= PRNG_CONFIG_HW_ENABLE; + writel(val, priv->base + PRNG_CONFIG); + } + } else { + val = readl_relaxed(priv->base + PRNG_CONFIG); + val &= ~PRNG_CONFIG_HW_ENABLE; + writel(val, priv->base + PRNG_CONFIG); + } + + return 0; +} + +static int msm_rng_probe(struct udevice *dev) +{ + struct msm_rng_priv *priv = dev_get_priv(dev); + + int ret; + + priv->base = dev_read_addr(dev); + if (priv->base == FDT_ADDR_T_NONE) + return -EINVAL; + + ret = clk_get_by_index(dev, 0, &priv->clk); + if (ret) + return ret; + + ret = clk_enable(&priv->clk); + if (ret < 0) + return ret; + + return msm_rng_enable(priv, 1); +} + +static int msm_rng_remove(struct udevice *dev) +{ + struct msm_rng_priv *priv = dev_get_priv(dev); + + return msm_rng_enable(priv, 0); +} + +static const struct dm_rng_ops msm_rng_ops = { + .read = msm_rng_read, +}; + +static const struct udevice_id msm_rng_match[] = { + { .compatible = "qcom,prng", }, + {}, +}; + +U_BOOT_DRIVER(msm_rng) = { + .name = "msm-rng", + .id = UCLASS_RNG, + .of_match = msm_rng_match, + .ops = &msm_rng_ops, + .probe = msm_rng_probe, + .remove = msm_rng_remove, + .priv_auto_alloc_size = sizeof(struct msm_rng_priv), +}; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 59e2fc44ba9..d06d272e14b 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -63,6 +63,17 @@ config RTC_DS3232 Support for Dallas Semiconductor (now Maxim) DS3232 compatible Real Time Clock devices. +config RTC_EMULATION + bool "Enable emulated RTC" + depends on DM_RTC + help + On a board without hardware clock this software real time clock can be + used. The build time is used to initialize the RTC. So you will have + to adjust the time either manually using the 'date' command or use + the 'sntp' to update the RTC with the time from a network time server. + See CONFIG_CMD_SNTP and CONFIG_BOOTP_NTPSERVER. The RTC time is + advanced according to CPU ticks. + config RTC_ISL1208 bool "Enable ISL1208 driver" depends on DM_RTC @@ -75,6 +86,12 @@ config RTC_ISL1208 This driver supports reading and writing the RTC/calendar and detects total power failures. +config RTC_PCF8563 + tristate "Philips PCF8563" + help + If you say yes here you get support for the Philips PCF8563 RTC + and compatible chips. + config RTC_RV3029 bool "Enable RV3029 driver" depends on DM_RTC diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 12eb449583a..ef66dc4bf02 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_RTC_DS164x) += ds164x.o obj-$(CONFIG_RTC_DS174x) += ds174x.o obj-$(CONFIG_RTC_DS3231) += ds3231.o obj-$(CONFIG_RTC_DS3232) += ds3232.o +obj-$(CONFIG_RTC_EMULATION) += emul_rtc.o obj-$(CONFIG_RTC_FTRTC010) += ftrtc010.o obj-$(CONFIG_SANDBOX) += i2c_rtc_emul.o obj-$(CONFIG_RTC_IMXDI) += imxdi.o diff --git a/drivers/rtc/emul_rtc.c b/drivers/rtc/emul_rtc.c new file mode 100644 index 00000000000..c98c24bbb3d --- /dev/null +++ b/drivers/rtc/emul_rtc.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2020, Heinrich Schuchardt <xypron.glpk@gmx.de> + * + * This driver emulates a real time clock based on timer ticks. + */ + +#include <common.h> +#include <div64.h> +#include <dm.h> +#include <generated/timestamp_autogenerated.h> +#include <rtc.h> + +/** + * struct emul_rtc - private data for emulated RTC driver + */ +struct emul_rtc { + /** + * @offset_us: microseconds from 1970-01-01 to timer_get_us() base + */ + u64 offset_us; + /** + * @isdst: daylight saving time + */ + int isdst; +}; + +static int emul_rtc_get(struct udevice *dev, struct rtc_time *time) +{ + struct emul_rtc *priv = dev_get_priv(dev); + u64 now; + + if (!priv->offset_us) { + /* Use the build date as initial time */ + priv->offset_us = U_BOOT_EPOCH * 1000000ULL - timer_get_us(); + priv->isdst = -1; + } + + now = timer_get_us() + priv->offset_us; + do_div(now, 1000000); + rtc_to_tm(now, time); + time->tm_isdst = priv->isdst; + + return 0; +} + +static int emul_rtc_set(struct udevice *dev, const struct rtc_time *time) +{ + struct emul_rtc *priv = dev_get_priv(dev); + + if (time->tm_year < 1970) + return -EINVAL; + + priv->offset_us = rtc_mktime(time) * 1000000ULL - timer_get_us(); + + if (time->tm_isdst > 0) + priv->isdst = 1; + else if (time->tm_isdst < 0) + priv->isdst = -1; + else + priv->isdst = 0; + + return 0; +} + +static const struct rtc_ops emul_rtc_ops = { + .get = emul_rtc_get, + .set = emul_rtc_set, +}; + +U_BOOT_DRIVER(rtc_emul) = { + .name = "rtc_emul", + .id = UCLASS_RTC, + .ops = &emul_rtc_ops, + .priv_auto_alloc_size = sizeof(struct emul_rtc), +}; + +U_BOOT_DEVICE(rtc_emul) = { + .name = "rtc_emul", +}; diff --git a/drivers/rtc/rtc-uclass.c b/drivers/rtc/rtc-uclass.c index 8035f7fe9cc..b406bab32d1 100644 --- a/drivers/rtc/rtc-uclass.c +++ b/drivers/rtc/rtc-uclass.c @@ -174,5 +174,7 @@ int rtc_write32(struct udevice *dev, unsigned int reg, u32 value) UCLASS_DRIVER(rtc) = { .name = "rtc", .id = UCLASS_RTC, +#if !CONFIG_IS_ENABLED(OF_PLATDATA) .post_bind = dm_scan_fdt_dev, +#endif }; diff --git a/drivers/rtc/sandbox_rtc.c b/drivers/rtc/sandbox_rtc.c index 852770a49cf..d0864b1df97 100644 --- a/drivers/rtc/sandbox_rtc.c +++ b/drivers/rtc/sandbox_rtc.c @@ -92,8 +92,8 @@ static const struct udevice_id sandbox_rtc_ids[] = { { } }; -U_BOOT_DRIVER(rtc_sandbox) = { - .name = "rtc-sandbox", +U_BOOT_DRIVER(sandbox_rtc) = { + .name = "sandbox_rtc", .id = UCLASS_RTC, .of_match = sandbox_rtc_ids, .ops = &sandbox_rtc_ops, diff --git a/drivers/serial/sandbox.c b/drivers/serial/sandbox.c index f09d291e043..db2fbac6295 100644 --- a/drivers/serial/sandbox.c +++ b/drivers/serial/sandbox.c @@ -267,6 +267,7 @@ U_BOOT_DRIVER(sandbox_serial) = { .flags = DM_FLAG_PRE_RELOC, }; +#if !CONFIG_IS_ENABLED(OF_PLATDATA) static const struct sandbox_serial_platdata platdata_non_fdt = { .colour = -1, }; @@ -275,4 +276,6 @@ U_BOOT_DEVICE(serial_sandbox_non_fdt) = { .name = "sandbox_serial", .platdata = &platdata_non_fdt, }; +#endif + #endif /* CONFIG_IS_ENABLED(OF_CONTROL) */ diff --git a/drivers/serial/serial-uclass.c b/drivers/serial/serial-uclass.c index 0027625ebfd..f3c25d42167 100644 --- a/drivers/serial/serial-uclass.c +++ b/drivers/serial/serial-uclass.c @@ -413,7 +413,7 @@ static int on_baudrate(const char *name, const char *value, enum env_op op, if ((flags & H_INTERACTIVE) != 0) while (1) { - if (getc() == '\r') + if (getchar() == '\r') break; } diff --git a/drivers/serial/serial.c b/drivers/serial/serial.c index 53358acb81f..355659ba056 100644 --- a/drivers/serial/serial.c +++ b/drivers/serial/serial.c @@ -90,7 +90,7 @@ static int on_baudrate(const char *name, const char *value, enum env_op op, if ((flags & H_INTERACTIVE) != 0) while (1) { - if (getc() == '\r') + if (getchar() == '\r') break; } diff --git a/drivers/serial/serial_pl01x.c b/drivers/serial/serial_pl01x.c index 2772c25f1d2..d9e35c6a2b4 100644 --- a/drivers/serial/serial_pl01x.c +++ b/drivers/serial/serial_pl01x.c @@ -19,6 +19,7 @@ #include <watchdog.h> #include <asm/io.h> #include <serial.h> +#include <dm/device_compat.h> #include <dm/platform_data/serial_pl01x.h> #include <linux/compiler.h> #include "serial_pl01x_internal.h" @@ -362,8 +363,18 @@ int pl01x_serial_ofdata_to_platdata(struct udevice *dev) plat->clock = dev_read_u32_default(dev, "clock", CONFIG_PL011_CLOCK); ret = clk_get_by_index(dev, 0, &clk); if (!ret) { - clk_enable(&clk); + ret = clk_enable(&clk); + if (ret && ret != -ENOSYS) { + dev_err(dev, "failed to enable clock\n"); + return ret; + } + plat->clock = clk_get_rate(&clk); + if (IS_ERR_VALUE(plat->clock)) { + dev_err(dev, "failed to get rate\n"); + return plat->clock; + } + debug("%s: CLK %d\n", __func__, plat->clock); } plat->type = dev_get_driver_data(dev); plat->skip_init = dev_read_bool(dev, "skip-init"); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 5df97c80fa5..f7a98525655 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -271,6 +271,16 @@ config PL022_SPI controller. If you have an embedded system with an AMBA(R) bus and a PL022 controller, say Y or M here. +config SPI_QUP + bool "Qualcomm SPI controller with QUP interface" + depends on ARCH_IPQ40XX + help + Qualcomm Universal Peripheral (QUP) core is an AHB slave that + provides a common data path (an output FIFO and an input FIFO) + for serial peripheral interface (SPI) mini-core. SPI in master + mode supports up to 50MHz, up to four chip selects, programmable + data path from 4 bits to 32 bits and numerous protocol variants. + config RENESAS_RPC_SPI bool "Renesas RPC SPI driver" depends on RCAR_GEN3 || RZA1 diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index b5c9ff1af82..d9b5bd9b794 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_OCTEON_SPI) += octeon_spi.o obj-$(CONFIG_OMAP3_SPI) += omap3_spi.o obj-$(CONFIG_PIC32_SPI) += pic32_spi.o obj-$(CONFIG_PL022_SPI) += pl022_spi.o +obj-$(CONFIG_SPI_QUP) += spi-qup.o obj-$(CONFIG_RENESAS_RPC_SPI) += renesas_rpc_spi.o obj-$(CONFIG_ROCKCHIP_SPI) += rk_spi.o obj-$(CONFIG_SANDBOX_SPI) += sandbox_spi.o diff --git a/drivers/spi/fsl_qspi.c b/drivers/spi/fsl_qspi.c index eec968e5ec1..128f95877f2 100644 --- a/drivers/spi/fsl_qspi.c +++ b/drivers/spi/fsl_qspi.c @@ -24,19 +24,20 @@ */ #include <common.h> +#include <dm.h> +#include <dm/device_compat.h> #include <log.h> -#include <asm/io.h> +#include <spi.h> +#include <spi-mem.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/libfdt.h> #include <linux/sizes.h> #include <linux/iopoll.h> -#include <dm.h> #include <linux/iopoll.h> #include <linux/sizes.h> #include <linux/err.h> -#include <spi.h> -#include <spi-mem.h> +#include <asm/io.h> DECLARE_GLOBAL_DATA_PTR; diff --git a/drivers/spi/ich.c b/drivers/spi/ich.c index e1336b89c5a..a91cb785680 100644 --- a/drivers/spi/ich.c +++ b/drivers/spi/ich.c @@ -615,6 +615,7 @@ static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op) return ret; } +#if !CONFIG_IS_ENABLED(OF_PLATDATA) /** * ich_spi_get_basics() - Get basic information about the ICH device * @@ -657,6 +658,7 @@ static int ich_spi_get_basics(struct udevice *bus, bool can_probe, return ret; } +#endif /** * ich_get_mmap_bus() - Handle the get_mmap() method for a bus @@ -946,10 +948,10 @@ static int ich_spi_child_pre_probe(struct udevice *dev) static int ich_spi_ofdata_to_platdata(struct udevice *dev) { struct ich_spi_platdata *plat = dev_get_platdata(dev); - int ret; #if !CONFIG_IS_ENABLED(OF_PLATDATA) struct ich_spi_priv *priv = dev_get_priv(dev); + int ret; ret = ich_spi_get_basics(dev, true, &priv->pch, &plat->ich_version, &plat->mmio_base); diff --git a/drivers/spi/mvebu_a3700_spi.c b/drivers/spi/mvebu_a3700_spi.c index e860b9ec64b..eb13cf349ef 100644 --- a/drivers/spi/mvebu_a3700_spi.c +++ b/drivers/spi/mvebu_a3700_spi.c @@ -15,6 +15,7 @@ #include <asm/io.h> #include <dm/device_compat.h> #include <linux/bitops.h> +#include <asm/gpio.h> DECLARE_GLOBAL_DATA_PTR; @@ -27,6 +28,7 @@ DECLARE_GLOBAL_DATA_PTR; #define MVEBU_SPI_A3700_SPI_EN_0 BIT(16) #define MVEBU_SPI_A3700_CLK_PRESCALE_MASK 0x1f +#define MAX_CS_COUNT 4 /* SPI registers */ struct spi_reg { @@ -39,16 +41,23 @@ struct spi_reg { struct mvebu_spi_platdata { struct spi_reg *spireg; struct clk clk; + struct gpio_desc cs_gpios[MAX_CS_COUNT]; }; -static void spi_cs_activate(struct spi_reg *reg, int cs) +static void spi_cs_activate(struct mvebu_spi_platdata *plat, int cs) { - setbits_le32(®->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs); + if (CONFIG_IS_ENABLED(DM_GPIO) && dm_gpio_is_valid(&plat->cs_gpios[cs])) + dm_gpio_set_value(&plat->cs_gpios[cs], 1); + else + setbits_le32(&plat->spireg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs); } -static void spi_cs_deactivate(struct spi_reg *reg, int cs) +static void spi_cs_deactivate(struct mvebu_spi_platdata *plat, int cs) { - clrbits_le32(®->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs); + if (CONFIG_IS_ENABLED(DM_GPIO) && dm_gpio_is_valid(&plat->cs_gpios[cs])) + dm_gpio_set_value(&plat->cs_gpios[cs], 0); + else + clrbits_le32(&plat->spireg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs); } /** @@ -150,7 +159,7 @@ static int mvebu_spi_xfer(struct udevice *dev, unsigned int bitlen, /* Activate CS */ if (flags & SPI_XFER_BEGIN) { debug("SPI: activate cs.\n"); - spi_cs_activate(reg, spi_chip_select(dev)); + spi_cs_activate(plat, spi_chip_select(dev)); } /* Send and/or receive */ @@ -169,7 +178,7 @@ static int mvebu_spi_xfer(struct udevice *dev, unsigned int bitlen, return ret; debug("SPI: deactivate cs.\n"); - spi_cs_deactivate(reg, spi_chip_select(dev)); + spi_cs_deactivate(plat, spi_chip_select(dev)); } return 0; @@ -247,6 +256,26 @@ static int mvebu_spi_probe(struct udevice *bus) writel(data, ®->cfg); + /* Set up CS GPIOs in device tree, if any */ + if (CONFIG_IS_ENABLED(DM_GPIO) && gpio_get_list_count(bus, "cs-gpios") > 0) { + int i; + + for (i = 0; i < ARRAY_SIZE(plat->cs_gpios); i++) { + ret = gpio_request_by_name(bus, "cs-gpios", i, &plat->cs_gpios[i], 0); + if (ret < 0 || !dm_gpio_is_valid(&plat->cs_gpios[i])) { + /* Use the native CS function for this line */ + continue; + } + + ret = dm_gpio_set_dir_flags(&plat->cs_gpios[i], + GPIOD_IS_OUT | GPIOD_ACTIVE_LOW); + if (ret) { + dev_err(bus, "Setting cs %d error\n", i); + return ret; + } + } + } + return 0; } diff --git a/drivers/spi/nxp_fspi.c b/drivers/spi/nxp_fspi.c index c507437f2e7..9661e9e10d7 100644 --- a/drivers/spi/nxp_fspi.c +++ b/drivers/spi/nxp_fspi.c @@ -34,12 +34,13 @@ */ #include <common.h> -#include <asm/io.h> +#include <clk.h> +#include <dm.h> +#include <dm/device_compat.h> #include <malloc.h> #include <spi.h> #include <spi-mem.h> -#include <dm.h> -#include <clk.h> +#include <asm/io.h> #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/sizes.h> @@ -520,7 +521,7 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR); } -#if CONFIG_IS_ENABLED(CONFIG_CLK) +#if CONFIG_IS_ENABLED(CLK) static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f) { int ret; @@ -808,7 +809,7 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) int ret, i; u32 reg; -#if CONFIG_IS_ENABLED(CONFIG_CLK) +#if CONFIG_IS_ENABLED(CLK) /* disable and unprepare clock to avoid glitch pass to controller */ nxp_fspi_clk_disable_unprep(f); @@ -898,7 +899,7 @@ static int nxp_fspi_claim_bus(struct udevice *dev) static int nxp_fspi_set_speed(struct udevice *bus, uint speed) { -#if CONFIG_IS_ENABLED(CONFIG_CLK) +#if CONFIG_IS_ENABLED(CLK) struct nxp_fspi *f = dev_get_priv(bus); int ret; @@ -924,7 +925,7 @@ static int nxp_fspi_set_mode(struct udevice *bus, uint mode) static int nxp_fspi_ofdata_to_platdata(struct udevice *bus) { struct nxp_fspi *f = dev_get_priv(bus); -#if CONFIG_IS_ENABLED(CONFIG_CLK) +#if CONFIG_IS_ENABLED(CLK) int ret; #endif @@ -950,7 +951,7 @@ static int nxp_fspi_ofdata_to_platdata(struct udevice *bus) f->ahb_addr = map_physmem(ahb_addr, ahb_size, MAP_NOCACHE); f->memmap_phy_size = ahb_size; -#if CONFIG_IS_ENABLED(CONFIG_CLK) +#if CONFIG_IS_ENABLED(CLK) ret = clk_get_by_name(bus, "fspi_en", &f->clk_en); if (ret) { dev_err(bus, "failed to get fspi_en clock\n"); diff --git a/drivers/spi/octeon_spi.c b/drivers/spi/octeon_spi.c index 83fe6330a12..7e88e5580f7 100644 --- a/drivers/spi/octeon_spi.c +++ b/drivers/spi/octeon_spi.c @@ -519,7 +519,10 @@ static int octeon_spi_set_speed(struct udevice *bus, uint max_hz) if (max_hz > OCTEON_SPI_MAX_CLOCK_HZ) max_hz = OCTEON_SPI_MAX_CLOCK_HZ; - clk_rate = clk_get_rate(&priv->clk); + if (device_is_compatible(bus, "cavium,thunderx-spi")) + clk_rate = 100000000; + else + clk_rate = clk_get_rate(&priv->clk); if (IS_ERR_VALUE(clk_rate)) return -EINVAL; diff --git a/drivers/spi/renesas_rpc_spi.c b/drivers/spi/renesas_rpc_spi.c index 3ea59b8fb82..d0ff918af88 100644 --- a/drivers/spi/renesas_rpc_spi.c +++ b/drivers/spi/renesas_rpc_spi.c @@ -448,12 +448,13 @@ static const struct dm_spi_ops rpc_spi_ops = { }; static const struct udevice_id rpc_spi_ids[] = { + { .compatible = "renesas,rpc-r7s72100" }, { .compatible = "renesas,rpc-r8a7795" }, { .compatible = "renesas,rpc-r8a7796" }, { .compatible = "renesas,rpc-r8a77965" }, { .compatible = "renesas,rpc-r8a77970" }, { .compatible = "renesas,rpc-r8a77995" }, - { .compatible = "renesas,rpc-r7s72100" }, + { .compatible = "renesas,rcar-gen3-rpc" }, { } }; diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c new file mode 100644 index 00000000000..6f8df55fa56 --- /dev/null +++ b/drivers/spi/spi-qup.c @@ -0,0 +1,803 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Qualcomm QUP SPI controller + * FIFO and Block modes supported, no DMA + * mode support + * + * Copyright (c) 2020 Sartura Ltd. + * + * Author: Robert Marko <robert.marko@sartura.hr> + * Author: Luka Kovacic <luka.kovacic@sartura.hr> + * + * Based on stock U-boot and Linux drivers + */ + +#include <asm/gpio.h> +#include <asm/io.h> +#include <clk.h> +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <linux/delay.h> +#include <spi.h> + +#define QUP_CONFIG 0x0000 +#define QUP_STATE 0x0004 +#define QUP_IO_M_MODES 0x0008 +#define QUP_SW_RESET 0x000c +#define QUP_OPERATIONAL 0x0018 +#define QUP_ERROR_FLAGS 0x001c +#define QUP_ERROR_FLAGS_EN 0x0020 +#define QUP_OPERATIONAL_MASK 0x0028 +#define QUP_HW_VERSION 0x0030 +#define QUP_MX_OUTPUT_CNT 0x0100 +#define QUP_OUTPUT_FIFO 0x0110 +#define QUP_MX_WRITE_CNT 0x0150 +#define QUP_MX_INPUT_CNT 0x0200 +#define QUP_MX_READ_CNT 0x0208 +#define QUP_INPUT_FIFO 0x0218 + +#define SPI_CONFIG 0x0300 +#define SPI_IO_CONTROL 0x0304 +#define SPI_ERROR_FLAGS 0x0308 +#define SPI_ERROR_FLAGS_EN 0x030c + +/* QUP_CONFIG fields */ +#define QUP_CONFIG_SPI_MODE BIT(8) +#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13) +#define QUP_CONFIG_NO_INPUT BIT(7) +#define QUP_CONFIG_NO_OUTPUT BIT(6) +#define QUP_CONFIG_N 0x001f + +/* QUP_STATE fields */ +#define QUP_STATE_VALID BIT(2) +#define QUP_STATE_RESET 0 +#define QUP_STATE_RUN 1 +#define QUP_STATE_PAUSE 3 +#define QUP_STATE_MASK 3 +#define QUP_STATE_CLEAR 2 + +/* QUP_IO_M_MODES fields */ +#define QUP_IO_M_PACK_EN BIT(15) +#define QUP_IO_M_UNPACK_EN BIT(14) +#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12 +#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10 +#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT) +#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT) + +#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0) +#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2) +#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5) +#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7) + +#define QUP_IO_M_MODE_FIFO 0 +#define QUP_IO_M_MODE_BLOCK 1 +#define QUP_IO_M_MODE_DMOV 2 +#define QUP_IO_M_MODE_BAM 3 + +/* QUP_OPERATIONAL fields */ +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13) +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12) +#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11) +#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10) +#define QUP_OP_IN_SERVICE_FLAG BIT(9) +#define QUP_OP_OUT_SERVICE_FLAG BIT(8) +#define QUP_OP_IN_FIFO_FULL BIT(7) +#define QUP_OP_OUT_FIFO_FULL BIT(6) +#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5) +#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4) + +/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */ +#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5) +#define QUP_ERROR_INPUT_UNDER_RUN BIT(4) +#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3) +#define QUP_ERROR_INPUT_OVER_RUN BIT(2) + +/* SPI_CONFIG fields */ +#define SPI_CONFIG_HS_MODE BIT(10) +#define SPI_CONFIG_INPUT_FIRST BIT(9) +#define SPI_CONFIG_LOOPBACK BIT(8) + +/* SPI_IO_CONTROL fields */ +#define SPI_IO_C_FORCE_CS BIT(11) +#define SPI_IO_C_CLK_IDLE_HIGH BIT(10) +#define SPI_IO_C_MX_CS_MODE BIT(8) +#define SPI_IO_C_CS_N_POLARITY_0 BIT(4) +#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2) +#define SPI_IO_C_CS_SELECT_MASK 0x000c +#define SPI_IO_C_TRISTATE_CS BIT(1) +#define SPI_IO_C_NO_TRI_STATE BIT(0) + +/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */ +#define SPI_ERROR_CLK_OVER_RUN BIT(1) +#define SPI_ERROR_CLK_UNDER_RUN BIT(0) + +#define SPI_NUM_CHIPSELECTS 4 + +#define SPI_DELAY_THRESHOLD 1 +#define SPI_DELAY_RETRY 10 + +#define SPI_RESET_STATE 0 +#define SPI_RUN_STATE 1 +#define SPI_CORE_RESET 0 +#define SPI_CORE_RUNNING 1 + +#define DUMMY_DATA_VAL 0 +#define TIMEOUT_CNT 100 + +#define QUP_STATE_VALID_BIT 2 +#define QUP_CONFIG_MINI_CORE_MSK (0x0F << 8) +#define QUP_CONFIG_MINI_CORE_SPI BIT(8) +#define QUP_CONF_INPUT_MSK BIT(7) +#define QUP_CONF_INPUT_ENA (0 << 7) +#define QUP_CONF_NO_INPUT BIT(7) +#define QUP_CONF_OUTPUT_MSK BIT(6) +#define QUP_CONF_OUTPUT_ENA (0 << 6) +#define QUP_CONF_NO_OUTPUT BIT(6) +#define QUP_STATE_RUN_STATE 0x1 +#define QUP_STATE_RESET_STATE 0x0 +#define QUP_STATE_PAUSE_STATE 0x3 +#define SPI_BIT_WORD_MSK 0x1F +#define SPI_8_BIT_WORD 0x07 +#define LOOP_BACK_MSK BIT(8) +#define NO_LOOP_BACK (0 << 8) +#define SLAVE_OPERATION_MSK BIT(5) +#define SLAVE_OPERATION (0 << 5) +#define CLK_ALWAYS_ON (0 << 9) +#define MX_CS_MODE BIT(8) +#define CS_POLARITY_MASK BIT(4) +#define NO_TRI_STATE BIT(0) +#define FORCE_CS_MSK BIT(11) +#define FORCE_CS_EN BIT(11) +#define FORCE_CS_DIS (0 << 11) +#define OUTPUT_BIT_SHIFT_MSK BIT(16) +#define OUTPUT_BIT_SHIFT_EN BIT(16) +#define INPUT_BLOCK_MODE_MSK (0x03 << 12) +#define INPUT_BLOCK_MODE (0x01 << 12) +#define OUTPUT_BLOCK_MODE_MSK (0x03 << 10) +#define OUTPUT_BLOCK_MODE (0x01 << 10) +#define INPUT_BAM_MODE (0x3 << 12) +#define OUTPUT_BAM_MODE (0x3 << 10) +#define PACK_EN (0x1 << 15) +#define UNPACK_EN (0x1 << 14) +#define PACK_EN_MSK (0x1 << 15) +#define UNPACK_EN_MSK (0x1 << 14) +#define OUTPUT_SERVICE_MSK (0x1 << 8) +#define INPUT_SERVICE_MSK (0x1 << 9) +#define OUTPUT_SERVICE_DIS (0x1 << 8) +#define INPUT_SERVICE_DIS (0x1 << 9) +#define BLSP0_SPI_DEASSERT_WAIT_REG 0x0310 +#define QUP_DATA_AVAILABLE_FOR_READ BIT(5) +#define SPI_INPUT_BLOCK_SIZE 4 +#define SPI_OUTPUT_BLOCK_SIZE 4 +#define SPI_BITLEN_MSK 0x07 +#define MAX_COUNT_SIZE 0xffff + +struct qup_spi_priv { + phys_addr_t base; + struct clk clk; + u32 num_cs; + struct gpio_desc cs_gpios[SPI_NUM_CHIPSELECTS]; + bool cs_high; + u32 core_state; +}; + +static int qup_spi_set_cs(struct udevice *dev, unsigned int cs, bool enable) +{ + struct qup_spi_priv *priv = dev_get_priv(dev); + + debug("%s: cs=%d enable=%d\n", __func__, cs, enable); + + if (cs >= SPI_NUM_CHIPSELECTS) + return -ENODEV; + + if (!dm_gpio_is_valid(&priv->cs_gpios[cs])) + return -EINVAL; + + if (priv->cs_high) + enable = !enable; + + return dm_gpio_set_value(&priv->cs_gpios[cs], enable ? 1 : 0); +} + +/* + * Function to write data to OUTPUT FIFO + */ +static void qup_spi_write_byte(struct udevice *dev, unsigned char data) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + /* Wait for space in the FIFO */ + while ((readl(priv->base + QUP_OPERATIONAL) & QUP_OP_OUT_FIFO_FULL)) + udelay(1); + + /* Write the byte of data */ + writel(data, priv->base + QUP_OUTPUT_FIFO); +} + +/* + * Function to read data from Input FIFO + */ +static unsigned char qup_spi_read_byte(struct udevice *dev) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + /* Wait for Data in FIFO */ + while (!(readl(priv->base + QUP_OPERATIONAL) & QUP_DATA_AVAILABLE_FOR_READ)) { + printf("Stuck at FIFO data wait\n"); + udelay(1); + } + + /* Read a byte of data */ + return readl(priv->base + QUP_INPUT_FIFO) & 0xff; +} + +/* + * Function to check wheather Input or Output FIFO + * has data to be serviced + */ +static int qup_spi_check_fifo_status(struct udevice *dev, u32 reg_addr) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + unsigned int count = TIMEOUT_CNT; + unsigned int status_flag; + unsigned int val; + + do { + val = readl(priv->base + reg_addr); + count--; + if (count == 0) + return -ETIMEDOUT; + + status_flag = ((val & QUP_OP_OUT_SERVICE_FLAG) | (val & QUP_OP_IN_SERVICE_FLAG)); + } while (!status_flag); + + return 0; +} + +/* + * Function to configure Input and Output enable/disable + */ +static void qup_spi_enable_io_config(struct udevice *dev, u32 write_cnt, u32 read_cnt) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + + if (write_cnt) { + clrsetbits_le32(priv->base + QUP_CONFIG, + QUP_CONF_OUTPUT_MSK, QUP_CONF_OUTPUT_ENA); + } else { + clrsetbits_le32(priv->base + QUP_CONFIG, + QUP_CONF_OUTPUT_MSK, QUP_CONF_NO_OUTPUT); + } + + if (read_cnt) { + clrsetbits_le32(priv->base + QUP_CONFIG, + QUP_CONF_INPUT_MSK, QUP_CONF_INPUT_ENA); + } else { + clrsetbits_le32(priv->base + QUP_CONFIG, + QUP_CONF_INPUT_MSK, QUP_CONF_NO_INPUT); + } +} + +static int check_bit_state(struct udevice *dev, u32 reg_addr, int bit_num, int val, + int us_delay) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + unsigned int count = TIMEOUT_CNT; + unsigned int bit_val = ((readl(priv->base + reg_addr) >> bit_num) & 0x01); + + while (bit_val != val) { + count--; + if (count == 0) + return -ETIMEDOUT; + udelay(us_delay); + bit_val = ((readl(priv->base + reg_addr) >> bit_num) & 0x01); + } + + return 0; +} + +/* + * Check whether QUPn State is valid + */ +static int check_qup_state_valid(struct udevice *dev) +{ + return check_bit_state(dev, QUP_STATE, QUP_STATE_VALID, 1, 1); +} + +/* + * Configure QUPn Core state + */ +static int qup_spi_config_spi_state(struct udevice *dev, unsigned int state) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + u32 val; + int ret; + + ret = check_qup_state_valid(dev); + if (ret != 0) + return ret; + + switch (state) { + case SPI_RUN_STATE: + /* Set the state to RUN */ + val = ((readl(priv->base + QUP_STATE) & ~QUP_STATE_MASK) + | QUP_STATE_RUN); + writel(val, priv->base + QUP_STATE); + ret = check_qup_state_valid(dev); + if (ret != 0) + return ret; + priv->core_state = SPI_CORE_RUNNING; + break; + case SPI_RESET_STATE: + /* Set the state to RESET */ + val = ((readl(priv->base + QUP_STATE) & ~QUP_STATE_MASK) + | QUP_STATE_RESET); + writel(val, priv->base + QUP_STATE); + ret = check_qup_state_valid(dev); + if (ret != 0) + return ret; + priv->core_state = SPI_CORE_RESET; + break; + default: + printf("Unsupported QUP SPI state: %d\n", state); + ret = -EINVAL; + break; + } + return ret; +} + +/* + * Function to read bytes number of data from the Input FIFO + */ +static int __qup_spi_blsp_spi_read(struct udevice *dev, u8 *data_buffer, unsigned int bytes) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + u32 val; + unsigned int i; + unsigned int read_bytes = bytes; + unsigned int fifo_count; + int ret = 0; + int state_config; + + /* Configure no of bytes to read */ + state_config = qup_spi_config_spi_state(dev, SPI_RESET_STATE); + if (state_config) + return state_config; + + /* Configure input and output enable */ + qup_spi_enable_io_config(dev, 0, read_bytes); + + writel(bytes, priv->base + QUP_MX_INPUT_CNT); + + state_config = qup_spi_config_spi_state(dev, SPI_RUN_STATE); + if (state_config) + return state_config; + + while (read_bytes) { + ret = qup_spi_check_fifo_status(dev, QUP_OPERATIONAL); + if (ret != 0) + goto out; + + val = readl(priv->base + QUP_OPERATIONAL); + if (val & QUP_OP_IN_SERVICE_FLAG) { + /* + * acknowledge to hw that software will + * read input data + */ + val &= QUP_OP_IN_SERVICE_FLAG; + writel(val, priv->base + QUP_OPERATIONAL); + + fifo_count = ((read_bytes > SPI_INPUT_BLOCK_SIZE) ? + SPI_INPUT_BLOCK_SIZE : read_bytes); + + for (i = 0; i < fifo_count; i++) { + *data_buffer = qup_spi_read_byte(dev); + data_buffer++; + read_bytes--; + } + } + } + +out: + /* + * Put the SPI Core back in the Reset State + * to end the transfer + */ + (void)qup_spi_config_spi_state(dev, SPI_RESET_STATE); + + return ret; +} + +static int qup_spi_blsp_spi_read(struct udevice *dev, u8 *data_buffer, unsigned int bytes) +{ + int length, ret; + + while (bytes) { + length = (bytes < MAX_COUNT_SIZE) ? bytes : MAX_COUNT_SIZE; + + ret = __qup_spi_blsp_spi_read(dev, data_buffer, length); + if (ret != 0) + return ret; + + data_buffer += length; + bytes -= length; + } + + return 0; +} + +/* + * Function to write data to the Output FIFO + */ +static int __qup_blsp_spi_write(struct udevice *dev, const u8 *cmd_buffer, unsigned int bytes) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + u32 val; + unsigned int i; + unsigned int write_len = bytes; + unsigned int read_len = bytes; + unsigned int fifo_count; + int ret = 0; + int state_config; + + state_config = qup_spi_config_spi_state(dev, SPI_RESET_STATE); + if (state_config) + return state_config; + + writel(bytes, priv->base + QUP_MX_OUTPUT_CNT); + writel(bytes, priv->base + QUP_MX_INPUT_CNT); + state_config = qup_spi_config_spi_state(dev, SPI_RUN_STATE); + if (state_config) + return state_config; + + /* Configure input and output enable */ + qup_spi_enable_io_config(dev, write_len, read_len); + + /* + * read_len considered to ensure that we read the dummy data for the + * write we performed. This is needed to ensure with WR-RD transaction + * to get the actual data on the subsequent read cycle that happens + */ + while (write_len || read_len) { + ret = qup_spi_check_fifo_status(dev, QUP_OPERATIONAL); + if (ret != 0) + goto out; + + val = readl(priv->base + QUP_OPERATIONAL); + if (val & QUP_OP_OUT_SERVICE_FLAG) { + /* + * acknowledge to hw that software will write + * expected output data + */ + val &= QUP_OP_OUT_SERVICE_FLAG; + writel(val, priv->base + QUP_OPERATIONAL); + + if (write_len > SPI_OUTPUT_BLOCK_SIZE) + fifo_count = SPI_OUTPUT_BLOCK_SIZE; + else + fifo_count = write_len; + + for (i = 0; i < fifo_count; i++) { + /* Write actual data to output FIFO */ + qup_spi_write_byte(dev, *cmd_buffer); + cmd_buffer++; + write_len--; + } + } + if (val & QUP_OP_IN_SERVICE_FLAG) { + /* + * acknowledge to hw that software + * will read input data + */ + val &= QUP_OP_IN_SERVICE_FLAG; + writel(val, priv->base + QUP_OPERATIONAL); + + if (read_len > SPI_INPUT_BLOCK_SIZE) + fifo_count = SPI_INPUT_BLOCK_SIZE; + else + fifo_count = read_len; + + for (i = 0; i < fifo_count; i++) { + /* Read dummy data for the data written */ + (void)qup_spi_read_byte(dev); + + /* Decrement the write count after reading the + * dummy data from the device. This is to make + * sure we read dummy data before we write the + * data to fifo + */ + read_len--; + } + } + } +out: + /* + * Put the SPI Core back in the Reset State + * to end the transfer + */ + (void)qup_spi_config_spi_state(dev, SPI_RESET_STATE); + + return ret; +} + +static int qup_spi_blsp_spi_write(struct udevice *dev, const u8 *cmd_buffer, unsigned int bytes) +{ + int length, ret; + + while (bytes) { + length = (bytes < MAX_COUNT_SIZE) ? bytes : MAX_COUNT_SIZE; + + ret = __qup_blsp_spi_write(dev, cmd_buffer, length); + if (ret != 0) + return ret; + + cmd_buffer += length; + bytes -= length; + } + + return 0; +} + +static int qup_spi_set_speed(struct udevice *dev, uint speed) +{ + return 0; +} + +static int qup_spi_set_mode(struct udevice *dev, uint mode) +{ + struct qup_spi_priv *priv = dev_get_priv(dev); + unsigned int clk_idle_state; + unsigned int input_first_mode; + u32 val; + + switch (mode) { + case SPI_MODE_0: + clk_idle_state = 0; + input_first_mode = SPI_CONFIG_INPUT_FIRST; + break; + case SPI_MODE_1: + clk_idle_state = 0; + input_first_mode = 0; + break; + case SPI_MODE_2: + clk_idle_state = 1; + input_first_mode = SPI_CONFIG_INPUT_FIRST; + break; + case SPI_MODE_3: + clk_idle_state = 1; + input_first_mode = 0; + break; + default: + printf("Unsupported spi mode: %d\n", mode); + return -EINVAL; + } + + if (mode & SPI_CS_HIGH) + priv->cs_high = true; + else + priv->cs_high = false; + + val = readl(priv->base + SPI_CONFIG); + val |= input_first_mode; + writel(val, priv->base + SPI_CONFIG); + + val = readl(priv->base + SPI_IO_CONTROL); + if (clk_idle_state) + val |= SPI_IO_C_CLK_IDLE_HIGH; + else + val &= ~SPI_IO_C_CLK_IDLE_HIGH; + + writel(val, priv->base + SPI_IO_CONTROL); + + return 0; +} + +static void qup_spi_reset(struct udevice *dev) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + + /* Driver may not be probed yet */ + if (!priv) + return; + + writel(0x1, priv->base + QUP_SW_RESET); + udelay(5); +} + +static int qup_spi_hw_init(struct udevice *dev) +{ + struct udevice *bus = dev_get_parent(dev); + struct qup_spi_priv *priv = dev_get_priv(bus); + int ret; + + /* QUPn module configuration */ + qup_spi_reset(dev); + + /* Set the QUPn state */ + ret = qup_spi_config_spi_state(dev, SPI_RESET_STATE); + if (ret) + return ret; + + /* + * Configure Mini core to SPI core with Input Output enabled, + * SPI master, N = 8 bits + */ + clrsetbits_le32(priv->base + QUP_CONFIG, (QUP_CONFIG_MINI_CORE_MSK | + QUP_CONF_INPUT_MSK | + QUP_CONF_OUTPUT_MSK | + SPI_BIT_WORD_MSK), + (QUP_CONFIG_MINI_CORE_SPI | + QUP_CONF_INPUT_ENA | + QUP_CONF_OUTPUT_ENA | + SPI_8_BIT_WORD)); + + /* + * Configure Input first SPI protocol, + * SPI master mode and no loopback + */ + clrsetbits_le32(priv->base + SPI_CONFIG, (LOOP_BACK_MSK | + SLAVE_OPERATION_MSK), + (NO_LOOP_BACK | + SLAVE_OPERATION)); + + /* + * Configure SPI IO Control Register + * CLK_ALWAYS_ON = 0 + * MX_CS_MODE = 0 + * NO_TRI_STATE = 1 + */ + writel((CLK_ALWAYS_ON | NO_TRI_STATE), priv->base + SPI_IO_CONTROL); + + /* + * Configure SPI IO Modes. + * OUTPUT_BIT_SHIFT_EN = 1 + * INPUT_MODE = Block Mode + * OUTPUT MODE = Block Mode + */ + + clrsetbits_le32(priv->base + QUP_IO_M_MODES, (OUTPUT_BIT_SHIFT_MSK | + INPUT_BLOCK_MODE_MSK | + OUTPUT_BLOCK_MODE_MSK), + (OUTPUT_BIT_SHIFT_EN | + INPUT_BLOCK_MODE | + OUTPUT_BLOCK_MODE)); + + /* Disable Error mask */ + writel(0, priv->base + SPI_ERROR_FLAGS_EN); + writel(0, priv->base + QUP_ERROR_FLAGS_EN); + writel(0, priv->base + BLSP0_SPI_DEASSERT_WAIT_REG); + + return ret; +} + +static int qup_spi_claim_bus(struct udevice *dev) +{ + int ret; + + ret = qup_spi_hw_init(dev); + if (ret) + return -EIO; + + return 0; +} + +static int qup_spi_release_bus(struct udevice *dev) +{ + /* Reset the SPI hardware */ + qup_spi_reset(dev); + + return 0; +} + +static int qup_spi_xfer(struct udevice *dev, unsigned int bitlen, + const void *dout, void *din, unsigned long flags) +{ + struct udevice *bus = dev_get_parent(dev); + struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev); + unsigned int len; + const u8 *txp = dout; + u8 *rxp = din; + int ret = 0; + + if (bitlen & SPI_BITLEN_MSK) { + printf("Invalid bit length\n"); + return -EINVAL; + } + + len = bitlen >> 3; + + if (flags & SPI_XFER_BEGIN) { + ret = qup_spi_hw_init(dev); + if (ret != 0) + return ret; + + ret = qup_spi_set_cs(bus, slave_plat->cs, false); + if (ret != 0) + return ret; + } + + if (dout != NULL) { + ret = qup_spi_blsp_spi_write(dev, txp, len); + if (ret != 0) + return ret; + } + + if (din != NULL) { + ret = qup_spi_blsp_spi_read(dev, rxp, len); + if (ret != 0) + return ret; + } + + if (flags & SPI_XFER_END) { + ret = qup_spi_set_cs(bus, slave_plat->cs, true); + if (ret != 0) + return ret; + } + + return ret; +} + +static int qup_spi_probe(struct udevice *dev) +{ + struct qup_spi_priv *priv = dev_get_priv(dev); + int ret; + + priv->base = dev_read_addr(dev); + if (priv->base == FDT_ADDR_T_NONE) + return -EINVAL; + + ret = clk_get_by_index(dev, 0, &priv->clk); + if (ret) + return ret; + + ret = clk_enable(&priv->clk); + if (ret < 0) + return ret; + + priv->num_cs = dev_read_u32_default(dev, "num-cs", 1); + + ret = gpio_request_list_by_name(dev, "cs-gpios", priv->cs_gpios, + priv->num_cs, GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); + if (ret < 0) { + printf("Can't get %s cs gpios: %d\n", dev->name, ret); + return -EINVAL; + } + + return 0; +} + +static const struct dm_spi_ops qup_spi_ops = { + .claim_bus = qup_spi_claim_bus, + .release_bus = qup_spi_release_bus, + .xfer = qup_spi_xfer, + .set_speed = qup_spi_set_speed, + .set_mode = qup_spi_set_mode, + /* + * cs_info is not needed, since we require all chip selects to be + * in the device tree explicitly + */ +}; + +static const struct udevice_id qup_spi_ids[] = { + { .compatible = "qcom,spi-qup-v1.1.1", }, + { .compatible = "qcom,spi-qup-v2.1.1", }, + { .compatible = "qcom,spi-qup-v2.2.1", }, + { } +}; + +U_BOOT_DRIVER(spi_qup) = { + .name = "spi_qup", + .id = UCLASS_SPI, + .of_match = qup_spi_ids, + .ops = &qup_spi_ops, + .priv_auto_alloc_size = sizeof(struct qup_spi_priv), + .probe = qup_spi_probe, +}; diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 348630faf38..47a5571aecd 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -214,7 +214,7 @@ static void xilinx_spi_startup_block(struct udevice *dev, unsigned int bytes, struct dm_spi_slave_platdata *slave_plat = dev_get_parent_platdata(dev); const unsigned char *txp = dout; unsigned char *rxp = din; - u32 reg, count; + u32 reg; u32 txbytes = bytes; u32 rxbytes = bytes; @@ -224,10 +224,10 @@ static void xilinx_spi_startup_block(struct udevice *dev, unsigned int bytes, * it sets txp to the initial value for the normal operation. */ for ( ; priv->startup < 2; priv->startup++) { - count = xilinx_spi_fill_txfifo(bus, txp, txbytes); + xilinx_spi_fill_txfifo(bus, txp, txbytes); reg = readl(®s->spicr) & ~SPICR_MASTER_INHIBIT; writel(reg, ®s->spicr); - count = xilinx_spi_read_rxfifo(bus, rxp, rxbytes); + xilinx_spi_read_rxfifo(bus, rxp, rxbytes); txp = din; if (priv->startup) { @@ -251,7 +251,7 @@ static int xilinx_spi_xfer(struct udevice *dev, unsigned int bitlen, unsigned char *rxp = din; u32 txbytes = bytes; u32 rxbytes = bytes; - u32 reg, count, timeout; + u32 reg, count; int ret; debug("spi_xfer: bus:%i cs:%i bitlen:%i bytes:%i flags:%lx\n", diff --git a/drivers/spi/zynq_qspi.c b/drivers/spi/zynq_qspi.c index 3f39ef05f2d..f2eddec950a 100644 --- a/drivers/spi/zynq_qspi.c +++ b/drivers/spi/zynq_qspi.c @@ -6,8 +6,10 @@ * Xilinx Zynq Quad-SPI(QSPI) controller driver (master mode only) */ +#include <clk.h> #include <common.h> #include <dm.h> +#include <dm/device_compat.h> #include <log.h> #include <malloc.h> #include <spi.h> @@ -105,17 +107,29 @@ static int zynq_qspi_ofdata_to_platdata(struct udevice *bus) plat->regs = (struct zynq_qspi_regs *)fdtdec_get_addr(blob, node, "reg"); - /* FIXME: Use 166MHz as a suitable default */ - plat->frequency = fdtdec_get_int(blob, node, "spi-max-frequency", - 166666666); - plat->speed_hz = plat->frequency / 2; - - debug("%s: regs=%p max-frequency=%d\n", __func__, - plat->regs, plat->frequency); - return 0; } +/** + * zynq_qspi_init_hw - Initialize the hardware + * @priv: Pointer to the zynq_qspi_priv structure + * + * The default settings of the QSPI controller's configurable parameters on + * reset are + * - Master mode + * - Baud rate divisor is set to 2 + * - Threshold value for TX FIFO not full interrupt is set to 1 + * - Flash memory interface mode enabled + * - Size of the word to be transferred as 8 bit + * This function performs the following actions + * - Disable and clear all the interrupts + * - Enable manual slave select + * - Enable auto start + * - Deselect all the chip select lines + * - Set the size of the word to be transferred as 32 bit + * - Set the little endian mode of TX FIFO and + * - Enable the QSPI controller + */ static void zynq_qspi_init_hw(struct zynq_qspi_priv *priv) { struct zynq_qspi_regs *regs = priv->regs; @@ -159,19 +173,45 @@ static int zynq_qspi_probe(struct udevice *bus) { struct zynq_qspi_platdata *plat = dev_get_platdata(bus); struct zynq_qspi_priv *priv = dev_get_priv(bus); + struct clk clk; + unsigned long clock; + int ret; priv->regs = plat->regs; priv->fifo_depth = ZYNQ_QSPI_FIFO_DEPTH; + ret = clk_get_by_name(bus, "ref_clk", &clk); + if (ret < 0) { + dev_err(bus, "failed to get clock\n"); + return ret; + } + + clock = clk_get_rate(&clk); + if (IS_ERR_VALUE(clock)) { + dev_err(bus, "failed to get rate\n"); + return clock; + } + + ret = clk_enable(&clk); + if (ret && ret != -ENOSYS) { + dev_err(bus, "failed to enable clock\n"); + return ret; + } + /* init the zynq spi hw */ zynq_qspi_init_hw(priv); + plat->frequency = clock; + plat->speed_hz = plat->frequency / 2; + + debug("%s: max-frequency=%d\n", __func__, plat->speed_hz); + return 0; } -/* +/** * zynq_qspi_read_data - Copy data to RX buffer - * @zqspi: Pointer to the zynq_qspi structure + * @priv: Pointer to the zynq_qspi_priv structure * @data: The 32 bit variable where data is stored * @size: Number of bytes to be copied from data to RX buffer */ @@ -214,9 +254,9 @@ static void zynq_qspi_read_data(struct zynq_qspi_priv *priv, u32 data, u8 size) priv->bytes_to_receive = 0; } -/* +/** * zynq_qspi_write_data - Copy data from TX buffer - * @zqspi: Pointer to the zynq_qspi structure + * @priv: Pointer to the zynq_qspi_priv structure * @data: Pointer to the 32 bit variable where data is to be copied * @size: Number of bytes to be copied from TX buffer to data */ @@ -263,6 +303,11 @@ static void zynq_qspi_write_data(struct zynq_qspi_priv *priv, priv->bytes_to_transfer = 0; } +/** + * zynq_qspi_chipselect - Select or deselect the chip select line + * @priv: Pointer to the zynq_qspi_priv structure + * @is_on: Select(1) or deselect (0) the chip select line + */ static void zynq_qspi_chipselect(struct zynq_qspi_priv *priv, int is_on) { u32 confr; @@ -282,9 +327,10 @@ static void zynq_qspi_chipselect(struct zynq_qspi_priv *priv, int is_on) writel(confr, ®s->cr); } -/* +/** * zynq_qspi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible - * @zqspi: Pointer to the zynq_qspi structure + * @priv: Pointer to the zynq_qspi_priv structure + * @size: Number of bytes to be copied to fifo */ static void zynq_qspi_fill_tx_fifo(struct zynq_qspi_priv *priv, u32 size) { @@ -322,9 +368,9 @@ static void zynq_qspi_fill_tx_fifo(struct zynq_qspi_priv *priv, u32 size) } } -/* +/** * zynq_qspi_irq_poll - Interrupt service routine of the QSPI controller - * @zqspi: Pointer to the zynq_qspi structure + * @priv: Pointer to the zynq_qspi structure * * This function handles TX empty and Mode Fault interrupts only. * On TX empty interrupt this function reads the received data from RX FIFO and @@ -410,11 +456,9 @@ static int zynq_qspi_irq_poll(struct zynq_qspi_priv *priv) return 0; } -/* +/** * zynq_qspi_start_transfer - Initiates the QSPI transfer - * @qspi: Pointer to the spi_device structure - * @transfer: Pointer to the spi_transfer structure which provide information - * about next transfer parameters + * @priv: Pointer to the zynq_qspi_priv structure * * This function fills the TX FIFO, starts the QSPI transfer, and waits for the * transfer to be completed. diff --git a/drivers/spi/zynq_spi.c b/drivers/spi/zynq_spi.c index 9923931e36e..cb911c34f68 100644 --- a/drivers/spi/zynq_spi.c +++ b/drivers/spi/zynq_spi.c @@ -8,10 +8,12 @@ #include <common.h> #include <dm.h> +#include <dm/device_compat.h> #include <log.h> #include <malloc.h> #include <spi.h> #include <time.h> +#include <clk.h> #include <asm/io.h> #include <linux/bitops.h> #include <linux/delay.h> @@ -79,17 +81,10 @@ static int zynq_spi_ofdata_to_platdata(struct udevice *bus) plat->regs = dev_read_addr_ptr(bus); - /* FIXME: Use 250MHz as a suitable default */ - plat->frequency = fdtdec_get_int(blob, node, "spi-max-frequency", - 250000000); plat->deactivate_delay_us = fdtdec_get_int(blob, node, "spi-deactivate-delay", 0); plat->activate_delay_us = fdtdec_get_int(blob, node, "spi-activate-delay", 0); - plat->speed_hz = plat->frequency / 2; - - debug("%s: regs=%p max-frequency=%d\n", __func__, - plat->regs, plat->frequency); return 0; } @@ -128,13 +123,39 @@ static int zynq_spi_probe(struct udevice *bus) { struct zynq_spi_platdata *plat = dev_get_platdata(bus); struct zynq_spi_priv *priv = dev_get_priv(bus); + struct clk clk; + unsigned long clock; + int ret; priv->regs = plat->regs; priv->fifo_depth = ZYNQ_SPI_FIFO_DEPTH; + ret = clk_get_by_name(bus, "ref_clk", &clk); + if (ret < 0) { + dev_err(bus, "failed to get clock\n"); + return ret; + } + + clock = clk_get_rate(&clk); + if (IS_ERR_VALUE(clock)) { + dev_err(bus, "failed to get rate\n"); + return clock; + } + + ret = clk_enable(&clk); + if (ret && ret != -ENOSYS) { + dev_err(bus, "failed to enable clock\n"); + return ret; + } + /* init the zynq spi hw */ zynq_spi_init_hw(priv); + plat->frequency = clock; + plat->speed_hz = plat->frequency / 2; + + debug("%s: max-frequency=%d\n", __func__, plat->speed_hz); + return 0; } diff --git a/drivers/sysreset/Kconfig b/drivers/sysreset/Kconfig index 6ebc90e1d33..70692f07e7f 100644 --- a/drivers/sysreset/Kconfig +++ b/drivers/sysreset/Kconfig @@ -79,12 +79,12 @@ config SYSRESET_SOCFPGA This enables the system reset driver support for Intel SOCFPGA SoCs (Cyclone 5, Arria 5 and Arria 10). -config SYSRESET_SOCFPGA_S10 - bool "Enable support for Intel SOCFPGA Stratix 10" - depends on ARCH_SOCFPGA && TARGET_SOCFPGA_STRATIX10 +config SYSRESET_SOCFPGA_SOC64 + bool "Enable support for Intel SOCFPGA SoC64 family (Stratix10/Agilex)" + depends on ARCH_SOCFPGA && (TARGET_SOCFPGA_STRATIX10 || TARGET_SOCFPGA_AGILEX) help This enables the system reset driver support for Intel SOCFPGA - Stratix SoCs. + SoC64 SoCs. config SYSRESET_TI_SCI bool "TI System Control Interface (TI SCI) system reset driver" diff --git a/drivers/sysreset/Makefile b/drivers/sysreset/Makefile index df2293b8489..920c69233f7 100644 --- a/drivers/sysreset/Makefile +++ b/drivers/sysreset/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_SYSRESET_MICROBLAZE) += sysreset_microblaze.o obj-$(CONFIG_SYSRESET_OCTEON) += sysreset_octeon.o obj-$(CONFIG_SYSRESET_PSCI) += sysreset_psci.o obj-$(CONFIG_SYSRESET_SOCFPGA) += sysreset_socfpga.o -obj-$(CONFIG_SYSRESET_SOCFPGA_S10) += sysreset_socfpga_s10.o +obj-$(CONFIG_SYSRESET_SOCFPGA_SOC64) += sysreset_socfpga_soc64.o obj-$(CONFIG_SYSRESET_TI_SCI) += sysreset-ti-sci.o obj-$(CONFIG_SYSRESET_SYSCON) += sysreset_syscon.o obj-$(CONFIG_SYSRESET_WATCHDOG) += sysreset_watchdog.o diff --git a/drivers/sysreset/sysreset_mpc83xx.c b/drivers/sysreset/sysreset_mpc83xx.c index 456f006bc12..4e89971840f 100644 --- a/drivers/sysreset/sysreset_mpc83xx.c +++ b/drivers/sysreset/sysreset_mpc83xx.c @@ -106,7 +106,7 @@ static int print_83xx_arb_event(bool force, char *buf, int size) if (!force && !gd->arch.arbiter_event_address) return 0; - if (CONFIG_IS_ENABLED(CONFIG_DISPLAY_AER_FULL)) { + if (CONFIG_IS_ENABLED(DISPLAY_AER_FULL)) { res = snprintf(buf, size, "Arbiter Event Status:\n" " %s: 0x%08lX\n" @@ -119,7 +119,7 @@ static int print_83xx_arb_event(bool force, char *buf, int size) "Master ID", mstr_id, master[mstr_id], "Transfer Size", tsize_val, tsize_bytes, "Transfer Type", ttype, transfer[ttype]); - } else if (CONFIG_IS_ENABLED(CONFIG_DISPLAY_AER_BRIEF)) { + } else if (CONFIG_IS_ENABLED(DISPLAY_AER_BRIEF)) { res = snprintf(buf, size, "Arbiter Event Status: AEATR=0x%08lX, AEADR=0x%08lX\n", gd->arch.arbiter_event_attributes, @@ -183,8 +183,8 @@ static int mpc83xx_sysreset_get_status(struct udevice *dev, char *buf, int size) * TODO(mario.six@gdsys.cc): Move this into a dedicated * arbiter driver */ - if (CONFIG_IS_ENABLED(CONFIG_DISPLAY_AER_FULL) || - CONFIG_IS_ENABLED(CONFIG_DISPLAY_AER_BRIEF)) { + if (CONFIG_IS_ENABLED(DISPLAY_AER_FULL) || + CONFIG_IS_ENABLED(DISPLAY_AER_BRIEF)) { /* * If there was a bus monitor reset event, we force the arbiter * event to be printed diff --git a/drivers/sysreset/sysreset_sandbox.c b/drivers/sysreset/sysreset_sandbox.c index 69c22a70008..71cabd19568 100644 --- a/drivers/sysreset/sysreset_sandbox.c +++ b/drivers/sysreset/sysreset_sandbox.c @@ -130,7 +130,9 @@ U_BOOT_DRIVER(warm_sysreset_sandbox) = { .ops = &sandbox_warm_sysreset_ops, }; +#if !CONFIG_IS_ENABLED(OF_PLATDATA) /* This is here in case we don't have a device tree */ U_BOOT_DEVICE(sysreset_sandbox_non_fdt) = { .name = "sysreset_sandbox", }; +#endif diff --git a/drivers/sysreset/sysreset_socfpga_s10.c b/drivers/sysreset/sysreset_socfpga_soc64.c index 9837aadf64b..9837aadf64b 100644 --- a/drivers/sysreset/sysreset_socfpga_s10.c +++ b/drivers/sysreset/sysreset_socfpga_soc64.c diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 5260dab3ac0..66ade37cd40 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -5,6 +5,7 @@ #include <common.h> #include <dm.h> +#include <dm/device_compat.h> #include <log.h> #include <malloc.h> #include <tee.h> diff --git a/drivers/timer/Kconfig b/drivers/timer/Kconfig index f8fa4aa71f4..80743a25519 100644 --- a/drivers/timer/Kconfig +++ b/drivers/timer/Kconfig @@ -53,6 +53,13 @@ config ALTERA_TIMER Select this to enable a timer for Altera devices. Please find details on the "Embedded Peripherals IP User Guide" of Altera. +config ANDES_PLMT_TIMER + bool + depends on RISCV_MMODE || SPL_RISCV_MMODE + help + The Andes PLMT block holds memory-mapped mtime register + associated with timer tick. + config ARC_TIMER bool "ARC timer support" depends on TIMER && ARC && CLK diff --git a/drivers/timer/Makefile b/drivers/timer/Makefile index 3a4d74b996b..eb5c48cc6ce 100644 --- a/drivers/timer/Makefile +++ b/drivers/timer/Makefile @@ -5,6 +5,7 @@ obj-y += timer-uclass.o obj-$(CONFIG_AG101P_TIMER) += ag101p_timer.o obj-$(CONFIG_ALTERA_TIMER) += altera_timer.o +obj-$(CONFIG_ANDES_PLMT_TIMER) += andes_plmt_timer.o obj-$(CONFIG_ARC_TIMER) += arc_timer.o obj-$(CONFIG_AST_TIMER) += ast_timer.o obj-$(CONFIG_ATCPIT100_TIMER) += atcpit100_timer.o @@ -18,6 +19,7 @@ obj-$(CONFIG_RENESAS_OSTM_TIMER) += ostm_timer.o obj-$(CONFIG_RISCV_TIMER) += riscv_timer.o obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o obj-$(CONFIG_SANDBOX_TIMER) += sandbox_timer.o +obj-$(CONFIG_SIFIVE_CLINT) += sifive_clint_timer.o obj-$(CONFIG_STI_TIMER) += sti-timer.o obj-$(CONFIG_STM32_TIMER) += stm32_timer.o obj-$(CONFIG_X86_TSC_TIMER) += tsc_timer.o diff --git a/drivers/timer/ag101p_timer.c b/drivers/timer/ag101p_timer.c index c011906b938..23ad5b2b67a 100644 --- a/drivers/timer/ag101p_timer.c +++ b/drivers/timer/ag101p_timer.c @@ -62,14 +62,13 @@ struct atftmr_timer_platdata { struct atftmr_timer_regs *regs; }; -static int atftmr_timer_get_count(struct udevice *dev, u64 *count) +static u64 atftmr_timer_get_count(struct udevice *dev) { struct atftmr_timer_platdata *plat = dev->platdata; struct atftmr_timer_regs *const regs = plat->regs; u32 val; val = readl(®s->t3_counter); - *count = timer_conv_64(val); - return 0; + return timer_conv_64(val); } static int atftmr_timer_probe(struct udevice *dev) diff --git a/drivers/timer/altera_timer.c b/drivers/timer/altera_timer.c index 6cb2923e0b6..ccc164ee176 100644 --- a/drivers/timer/altera_timer.c +++ b/drivers/timer/altera_timer.c @@ -32,7 +32,7 @@ struct altera_timer_platdata { struct altera_timer_regs *regs; }; -static int altera_timer_get_count(struct udevice *dev, u64 *count) +static u64 altera_timer_get_count(struct udevice *dev) { struct altera_timer_platdata *plat = dev->platdata; struct altera_timer_regs *const regs = plat->regs; @@ -44,9 +44,7 @@ static int altera_timer_get_count(struct udevice *dev, u64 *count) /* Read timer value */ val = readl(®s->snapl) & 0xffff; val |= (readl(®s->snaph) & 0xffff) << 16; - *count = timer_conv_64(~val); - - return 0; + return timer_conv_64(~val); } static int altera_timer_probe(struct udevice *dev) diff --git a/drivers/timer/andes_plmt_timer.c b/drivers/timer/andes_plmt_timer.c new file mode 100644 index 00000000000..cec86718c7f --- /dev/null +++ b/drivers/timer/andes_plmt_timer.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019, Rick Chen <rick@andestech.com> + * Copyright (C) 2020, Sean Anderson <seanga2@gmail.com> + * + * U-Boot syscon driver for Andes's Platform Level Machine Timer (PLMT). + * The PLMT block holds memory-mapped mtime register + * associated with timer tick. + */ + +#include <common.h> +#include <dm.h> +#include <timer.h> +#include <asm/io.h> +#include <linux/err.h> + +/* mtime register */ +#define MTIME_REG(base) ((ulong)(base)) + +static u64 andes_plmt_get_count(struct udevice *dev) +{ + return readq((void __iomem *)MTIME_REG(dev->priv)); +} + +static const struct timer_ops andes_plmt_ops = { + .get_count = andes_plmt_get_count, +}; + +static int andes_plmt_probe(struct udevice *dev) +{ + dev->priv = dev_read_addr_ptr(dev); + if (!dev->priv) + return -EINVAL; + + return timer_timebase_fallback(dev); +} + +static const struct udevice_id andes_plmt_ids[] = { + { .compatible = "riscv,plmt0" }, + { } +}; + +U_BOOT_DRIVER(andes_plmt) = { + .name = "andes_plmt", + .id = UCLASS_TIMER, + .of_match = andes_plmt_ids, + .ops = &andes_plmt_ops, + .probe = andes_plmt_probe, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/timer/arc_timer.c b/drivers/timer/arc_timer.c index 8c574ec5af1..2dea9f40cba 100644 --- a/drivers/timer/arc_timer.c +++ b/drivers/timer/arc_timer.c @@ -26,7 +26,7 @@ struct arc_timer_priv { uint timer_id; }; -static int arc_timer_get_count(struct udevice *dev, u64 *count) +static u64 arc_timer_get_count(struct udevice *dev) { u32 val = 0; struct arc_timer_priv *priv = dev_get_priv(dev); @@ -39,9 +39,7 @@ static int arc_timer_get_count(struct udevice *dev, u64 *count) val = read_aux_reg(ARC_AUX_TIMER1_CNT); break; } - *count = timer_conv_64(val); - - return 0; + return timer_conv_64(val); } static int arc_timer_probe(struct udevice *dev) diff --git a/drivers/timer/ast_timer.c b/drivers/timer/ast_timer.c index e3132497404..35369a4087f 100644 --- a/drivers/timer/ast_timer.c +++ b/drivers/timer/ast_timer.c @@ -51,13 +51,11 @@ static int ast_timer_probe(struct udevice *dev) return 0; } -static int ast_timer_get_count(struct udevice *dev, u64 *count) +static u64 ast_timer_get_count(struct udevice *dev) { struct ast_timer_priv *priv = dev_get_priv(dev); - *count = AST_TMC_RELOAD_VAL - readl(&priv->tmc->status); - - return 0; + return AST_TMC_RELOAD_VAL - readl(&priv->tmc->status); } static int ast_timer_ofdata_to_platdata(struct udevice *dev) diff --git a/drivers/timer/atcpit100_timer.c b/drivers/timer/atcpit100_timer.c index 5d4ae685092..fcb8a453581 100644 --- a/drivers/timer/atcpit100_timer.c +++ b/drivers/timer/atcpit100_timer.c @@ -68,13 +68,12 @@ struct atcpit_timer_platdata { u32 *regs; }; -static int atcpit_timer_get_count(struct udevice *dev, u64 *count) +static u64 atcpit_timer_get_count(struct udevice *dev) { struct atcpit_timer_platdata *plat = dev_get_platdata(dev); u32 val; val = ~(REG32_TMR(CH_CNT(1))+0xffffffff); - *count = timer_conv_64(val); - return 0; + return timer_conv_64(val); } static int atcpit_timer_probe(struct udevice *dev) diff --git a/drivers/timer/atmel_pit_timer.c b/drivers/timer/atmel_pit_timer.c index 843d670b5e2..9f0ad1d703f 100644 --- a/drivers/timer/atmel_pit_timer.c +++ b/drivers/timer/atmel_pit_timer.c @@ -25,15 +25,13 @@ struct atmel_pit_platdata { struct atmel_pit_regs *regs; }; -static int atmel_pit_get_count(struct udevice *dev, u64 *count) +static u64 atmel_pit_get_count(struct udevice *dev) { struct atmel_pit_platdata *plat = dev_get_platdata(dev); struct atmel_pit_regs *const regs = plat->regs; u32 val = readl(®s->value_image); - *count = timer_conv_64(val); - - return 0; + return timer_conv_64(val); } static int atmel_pit_probe(struct udevice *dev) diff --git a/drivers/timer/cadence-ttc.c b/drivers/timer/cadence-ttc.c index e6b6dfe3765..bebb2c2e904 100644 --- a/drivers/timer/cadence-ttc.c +++ b/drivers/timer/cadence-ttc.c @@ -57,13 +57,11 @@ ulong timer_get_boot_us(void) } #endif -static int cadence_ttc_get_count(struct udevice *dev, u64 *count) +static u64 cadence_ttc_get_count(struct udevice *dev) { struct cadence_ttc_priv *priv = dev_get_priv(dev); - *count = readl(&priv->regs->counter_val1); - - return 0; + return readl(&priv->regs->counter_val1); } static int cadence_ttc_probe(struct udevice *dev) diff --git a/drivers/timer/dw-apb-timer.c b/drivers/timer/dw-apb-timer.c index 35271b20c89..68bc258131b 100644 --- a/drivers/timer/dw-apb-timer.c +++ b/drivers/timer/dw-apb-timer.c @@ -25,7 +25,7 @@ struct dw_apb_timer_priv { struct reset_ctl_bulk resets; }; -static int dw_apb_timer_get_count(struct udevice *dev, u64 *count) +static u64 dw_apb_timer_get_count(struct udevice *dev) { struct dw_apb_timer_priv *priv = dev_get_priv(dev); @@ -34,9 +34,7 @@ static int dw_apb_timer_get_count(struct udevice *dev, u64 *count) * requires the count to be incrementing. Invert the * result. */ - *count = timer_conv_64(~readl(priv->regs + DW_APB_CURR_VAL)); - - return 0; + return timer_conv_64(~readl(priv->regs + DW_APB_CURR_VAL)); } static int dw_apb_timer_probe(struct udevice *dev) diff --git a/drivers/timer/mchp-pit64b-timer.c b/drivers/timer/mchp-pit64b-timer.c index ead8c9b84ad..ad962098b3d 100644 --- a/drivers/timer/mchp-pit64b-timer.c +++ b/drivers/timer/mchp-pit64b-timer.c @@ -27,16 +27,14 @@ struct mchp_pit64b_priv { void __iomem *base; }; -static int mchp_pit64b_get_count(struct udevice *dev, u64 *count) +static u64 mchp_pit64b_get_count(struct udevice *dev) { struct mchp_pit64b_priv *priv = dev_get_priv(dev); u32 lsb = readl(priv->base + MCHP_PIT64B_TLSBR); u32 msb = readl(priv->base + MCHP_PIT64B_TMSBR); - *count = ((u64)msb << 32) | lsb; - - return 0; + return ((u64)msb << 32) | lsb; } static int mchp_pit64b_probe(struct udevice *dev) diff --git a/drivers/timer/mpc83xx_timer.c b/drivers/timer/mpc83xx_timer.c index ad8bb28e8b3..ba7704225a3 100644 --- a/drivers/timer/mpc83xx_timer.c +++ b/drivers/timer/mpc83xx_timer.c @@ -187,7 +187,7 @@ void wait_ticks(ulong ticks) WATCHDOG_RESET(); } -static int mpc83xx_timer_get_count(struct udevice *dev, u64 *count) +static u64 mpc83xx_timer_get_count(struct udevice *dev) { u32 tbu, tbl; @@ -201,9 +201,7 @@ static int mpc83xx_timer_get_count(struct udevice *dev, u64 *count) tbl = mftb(); } while (tbu != mftbu()); - *count = (tbu * 0x10000ULL) + tbl; - - return 0; + return (tbu * 0x10000ULL) + tbl; } static int mpc83xx_timer_probe(struct udevice *dev) diff --git a/drivers/timer/mtk_timer.c b/drivers/timer/mtk_timer.c index 69ed521811d..74e9ea34ffa 100644 --- a/drivers/timer/mtk_timer.c +++ b/drivers/timer/mtk_timer.c @@ -27,14 +27,12 @@ struct mtk_timer_priv { void __iomem *base; }; -static int mtk_timer_get_count(struct udevice *dev, u64 *count) +static u64 mtk_timer_get_count(struct udevice *dev) { struct mtk_timer_priv *priv = dev_get_priv(dev); u32 val = readl(priv->base + MTK_GPT4_CNT); - *count = timer_conv_64(val); - - return 0; + return timer_conv_64(val); } static int mtk_timer_probe(struct udevice *dev) diff --git a/drivers/timer/nomadik-mtu-timer.c b/drivers/timer/nomadik-mtu-timer.c index 7ff921385a3..d7f7ca4effd 100644 --- a/drivers/timer/nomadik-mtu-timer.c +++ b/drivers/timer/nomadik-mtu-timer.c @@ -54,14 +54,12 @@ struct nomadik_mtu_priv { struct nomadik_mtu_timer_regs *timer; }; -static int nomadik_mtu_get_count(struct udevice *dev, u64 *count) +static u64 nomadik_mtu_get_count(struct udevice *dev) { struct nomadik_mtu_priv *priv = dev_get_priv(dev); /* Decrementing counter: invert the value */ - *count = timer_conv_64(~readl(&priv->timer->cv)); - - return 0; + return timer_conv_64(~readl(&priv->timer->cv)); } static int nomadik_mtu_probe(struct udevice *dev) diff --git a/drivers/timer/omap-timer.c b/drivers/timer/omap-timer.c index cf3d27b96bc..4eecb3e64d2 100644 --- a/drivers/timer/omap-timer.c +++ b/drivers/timer/omap-timer.c @@ -48,13 +48,11 @@ struct omap_timer_priv { struct omap_gptimer_regs *regs; }; -static int omap_timer_get_count(struct udevice *dev, u64 *count) +static u64 omap_timer_get_count(struct udevice *dev) { struct omap_timer_priv *priv = dev_get_priv(dev); - *count = timer_conv_64(readl(&priv->regs->tcrr)); - - return 0; + return timer_conv_64(readl(&priv->regs->tcrr)); } static int omap_timer_probe(struct udevice *dev) diff --git a/drivers/timer/ostm_timer.c b/drivers/timer/ostm_timer.c index bea97159ebe..bb0636a0719 100644 --- a/drivers/timer/ostm_timer.c +++ b/drivers/timer/ostm_timer.c @@ -27,13 +27,11 @@ struct ostm_priv { fdt_addr_t regs; }; -static int ostm_get_count(struct udevice *dev, u64 *count) +static u64 ostm_get_count(struct udevice *dev) { struct ostm_priv *priv = dev_get_priv(dev); - *count = timer_conv_64(readl(priv->regs + OSTM_CNT)); - - return 0; + return timer_conv_64(readl(priv->regs + OSTM_CNT)); } static int ostm_probe(struct udevice *dev) diff --git a/drivers/timer/riscv_timer.c b/drivers/timer/riscv_timer.c index 449fcfcfd59..21ae1840571 100644 --- a/drivers/timer/riscv_timer.c +++ b/drivers/timer/riscv_timer.c @@ -16,22 +16,19 @@ #include <timer.h> #include <asm/csr.h> -static int riscv_timer_get_count(struct udevice *dev, u64 *count) +static u64 riscv_timer_get_count(struct udevice *dev) { - if (IS_ENABLED(CONFIG_64BIT)) { - *count = csr_read(CSR_TIME); - } else { - u32 hi, lo; + __maybe_unused u32 hi, lo; - do { - hi = csr_read(CSR_TIMEH); - lo = csr_read(CSR_TIME); - } while (hi != csr_read(CSR_TIMEH)); + if (IS_ENABLED(CONFIG_64BIT)) + return csr_read(CSR_TIME); - *count = ((u64)hi << 32) | lo; - } + do { + hi = csr_read(CSR_TIMEH); + lo = csr_read(CSR_TIME); + } while (hi != csr_read(CSR_TIMEH)); - return 0; + return ((u64)hi << 32) | lo; } static int riscv_timer_probe(struct udevice *dev) diff --git a/drivers/timer/rockchip_timer.c b/drivers/timer/rockchip_timer.c index 7a5a4842527..53cdf09810d 100644 --- a/drivers/timer/rockchip_timer.c +++ b/drivers/timer/rockchip_timer.c @@ -88,14 +88,13 @@ ulong timer_get_boot_us(void) } #endif -static int rockchip_timer_get_count(struct udevice *dev, u64 *count) +static u64 rockchip_timer_get_count(struct udevice *dev) { struct rockchip_timer_priv *priv = dev_get_priv(dev); uint64_t cntr = rockchip_timer_get_curr_value(priv->timer); /* timers are down-counting */ - *count = ~0ull - cntr; - return 0; + return ~0ull - cntr; } static int rockchip_clk_ofdata_to_platdata(struct udevice *dev) diff --git a/drivers/timer/sandbox_timer.c b/drivers/timer/sandbox_timer.c index 6a503c2f153..135c0f38a4d 100644 --- a/drivers/timer/sandbox_timer.c +++ b/drivers/timer/sandbox_timer.c @@ -29,11 +29,9 @@ unsigned long notrace timer_early_get_rate(void) return SANDBOX_TIMER_RATE; } -static notrace int sandbox_timer_get_count(struct udevice *dev, u64 *count) +static notrace u64 sandbox_timer_get_count(struct udevice *dev) { - *count = timer_early_get_count(); - - return 0; + return timer_early_get_count(); } static int sandbox_timer_probe(struct udevice *dev) diff --git a/drivers/timer/sifive_clint_timer.c b/drivers/timer/sifive_clint_timer.c new file mode 100644 index 00000000000..00ce0f08d6e --- /dev/null +++ b/drivers/timer/sifive_clint_timer.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2020, Sean Anderson <seanga2@gmail.com> + * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com> + */ + +#include <common.h> +#include <clk.h> +#include <dm.h> +#include <timer.h> +#include <asm/io.h> +#include <linux/err.h> + +/* mtime register */ +#define MTIME_REG(base) ((ulong)(base) + 0xbff8) + +static u64 sifive_clint_get_count(struct udevice *dev) +{ + return readq((void __iomem *)MTIME_REG(dev->priv)); +} + +static const struct timer_ops sifive_clint_ops = { + .get_count = sifive_clint_get_count, +}; + +static int sifive_clint_probe(struct udevice *dev) +{ + dev->priv = dev_read_addr_ptr(dev); + if (!dev->priv) + return -EINVAL; + + return timer_timebase_fallback(dev); +} + +static const struct udevice_id sifive_clint_ids[] = { + { .compatible = "riscv,clint0" }, + { } +}; + +U_BOOT_DRIVER(sifive_clint) = { + .name = "sifive_clint", + .id = UCLASS_TIMER, + .of_match = sifive_clint_ids, + .probe = sifive_clint_probe, + .ops = &sifive_clint_ops, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/timer/sti-timer.c b/drivers/timer/sti-timer.c index ff42056abdd..e6843ebb337 100644 --- a/drivers/timer/sti-timer.c +++ b/drivers/timer/sti-timer.c @@ -17,7 +17,7 @@ struct sti_timer_priv { struct globaltimer *global_timer; }; -static int sti_timer_get_count(struct udevice *dev, u64 *count) +static u64 sti_timer_get_count(struct udevice *dev) { struct sti_timer_priv *priv = dev_get_priv(dev); struct globaltimer *global_timer = priv->global_timer; @@ -34,9 +34,7 @@ static int sti_timer_get_count(struct udevice *dev, u64 *count) old = high; } timer = high; - *count = (u64)((timer << 32) | low); - - return 0; + return (u64)((timer << 32) | low); } static int sti_timer_probe(struct udevice *dev) diff --git a/drivers/timer/stm32_timer.c b/drivers/timer/stm32_timer.c index c57fa3f5570..f517d5e61f2 100644 --- a/drivers/timer/stm32_timer.c +++ b/drivers/timer/stm32_timer.c @@ -52,14 +52,12 @@ struct stm32_timer_priv { struct stm32_timer_regs *base; }; -static int stm32_timer_get_count(struct udevice *dev, u64 *count) +static u64 stm32_timer_get_count(struct udevice *dev) { struct stm32_timer_priv *priv = dev_get_priv(dev); struct stm32_timer_regs *regs = priv->base; - *count = readl(®s->cnt); - - return 0; + return readl(®s->cnt); } static int stm32_timer_probe(struct udevice *dev) diff --git a/drivers/timer/timer-uclass.c b/drivers/timer/timer-uclass.c index e9802c8b43e..62d0e860e80 100644 --- a/drivers/timer/timer-uclass.c +++ b/drivers/timer/timer-uclass.c @@ -4,14 +4,15 @@ */ #include <common.h> +#include <clk.h> #include <cpu.h> #include <dm.h> -#include <init.h> #include <dm/lists.h> +#include <dm/device_compat.h> #include <dm/device-internal.h> #include <dm/root.h> -#include <clk.h> #include <errno.h> +#include <init.h> #include <timer.h> #include <linux/err.h> @@ -33,7 +34,8 @@ int notrace timer_get_count(struct udevice *dev, u64 *count) if (!ops->get_count) return -ENOSYS; - return ops->get_count(dev, count); + *count = ops->get_count(dev); + return 0; } unsigned long notrace timer_get_rate(struct udevice *dev) diff --git a/drivers/timer/tsc_timer.c b/drivers/timer/tsc_timer.c index 93c959ff444..abc0a1da05e 100644 --- a/drivers/timer/tsc_timer.c +++ b/drivers/timer/tsc_timer.c @@ -386,13 +386,11 @@ void __udelay(unsigned long usec) #endif } -static int tsc_timer_get_count(struct udevice *dev, u64 *count) +static u64 tsc_timer_get_count(struct udevice *dev) { u64 now_tick = rdtsc(); - *count = now_tick - gd->arch.tsc_base; - - return 0; + return now_tick - gd->arch.tsc_base; } static void tsc_timer_ensure_setup(bool early) diff --git a/drivers/tpm/cr50_i2c.c b/drivers/tpm/cr50_i2c.c index 64831a42232..a761e3d52f1 100644 --- a/drivers/tpm/cr50_i2c.c +++ b/drivers/tpm/cr50_i2c.c @@ -494,13 +494,13 @@ static int process_reset(struct udevice *dev) continue; } - log_warning("TPM ready after %ld ms\n", get_timer(start)); + log_debug("TPM ready after %ld ms\n", get_timer(start)); return 0; } while (get_timer(start) < TIMEOUT_INIT_MS); - log_warning("TPM failed to reset after %ld ms, status: %#x\n", - get_timer(start), access); + log_err("TPM failed to reset after %ld ms, status: %#x\n", + get_timer(start), access); return -EPERM; } @@ -539,7 +539,7 @@ static int claim_locality(struct udevice *dev, int loc) log_err("Failed to claim locality\n"); return -EPERM; } - log_info("Claimed locality %d\n", loc); + log_debug("Claimed locality %d\n", loc); priv->locality = loc; return 0; @@ -577,7 +577,7 @@ static int cr50_i2c_cleanup(struct udevice *dev) { struct cr50_priv *priv = dev_get_priv(dev); - printf("%s: cleanup %d\n", __func__, priv->locality); + log_debug("cleanup %d\n", priv->locality); if (priv->locality != -1) release_locality(dev, 1); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 34881a12b8b..fedc0134f5f 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -72,6 +72,8 @@ source "drivers/usb/cdns3/Kconfig" source "drivers/usb/dwc3/Kconfig" +source "drivers/usb/mtu3/Kconfig" + source "drivers/usb/musb/Kconfig" source "drivers/usb/musb-new/Kconfig" diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index d4ae18693c6..5e5c3c3e3dc 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -46,8 +46,16 @@ static const char *const speed_names[] = { [USB_SPEED_HIGH] = "high-speed", [USB_SPEED_WIRELESS] = "wireless", [USB_SPEED_SUPER] = "super-speed", + [USB_SPEED_SUPER_PLUS] = "super-speed-plus", }; +const char *usb_speed_string(enum usb_device_speed speed) +{ + if (speed < 0 || speed >= ARRAY_SIZE(speed_names)) + speed = USB_SPEED_UNKNOWN; + return speed_names[speed]; +} + enum usb_device_speed usb_get_maximum_speed(ofnode node) { const char *max_speed; diff --git a/drivers/usb/gadget/dwc2_udc_otg_xfer_dma.c b/drivers/usb/gadget/dwc2_udc_otg_xfer_dma.c index 1c0505eb28c..f17009a29e0 100644 --- a/drivers/usb/gadget/dwc2_udc_otg_xfer_dma.c +++ b/drivers/usb/gadget/dwc2_udc_otg_xfer_dma.c @@ -421,6 +421,9 @@ static void process_ep_out_intr(struct dwc2_udc *dev) { u32 ep_intr, ep_intr_status; u8 ep_num = 0; + u32 ep_tsr = 0, xfer_size = 0; + u32 epsiz_reg = reg->out_endp[ep_num].doeptsiz; + u32 req_size = sizeof(struct usb_ctrlrequest); ep_intr = readl(®->daint); debug_cond(DEBUG_OUT_EP != 0, @@ -441,10 +444,17 @@ static void process_ep_out_intr(struct dwc2_udc *dev) if (ep_num == 0) { if (ep_intr_status & TRANSFER_DONE) { - if (dev->ep0state != - WAIT_FOR_OUT_COMPLETE) + ep_tsr = readl(&epsiz_reg); + xfer_size = ep_tsr & + DOEPT_SIZ_XFER_SIZE_MAX_EP0; + + if (xfer_size == req_size && + dev->ep0state == WAIT_FOR_SETUP) { + dwc2_udc_pre_setup(); + } else if (dev->ep0state != + WAIT_FOR_OUT_COMPLETE) { complete_rx(dev, ep_num); - else { + } else { dev->ep0state = WAIT_FOR_SETUP; dwc2_udc_pre_setup(); } diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index 587204cfb7a..0cdf47c2dda 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h @@ -161,6 +161,12 @@ #define gadget_is_max3420(g) 0 #endif +#ifdef CONFIG_USB_MTU3_GADGET +#define gadget_is_mtu3(g) (!strcmp("mtu3-gadget", (g)->name)) +#else +#define gadget_is_mtu3(g) 0 +#endif + /** * usb_gadget_controller_number - support bcdDevice id convention * @gadget: the controller being driven @@ -224,5 +230,7 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget) return 0x24; else if (gadget_is_max3420(gadget)) return 0x25; + else if (gadget_is_mtu3(gadget)) + return 0x26; return -ENOENT; } diff --git a/drivers/usb/host/dwc3-octeon-glue.c b/drivers/usb/host/dwc3-octeon-glue.c index 39b31856163..c3cac9c5abd 100644 --- a/drivers/usb/host/dwc3-octeon-glue.c +++ b/drivers/usb/host/dwc3-octeon-glue.c @@ -13,6 +13,7 @@ #include <errno.h> #include <usb.h> #include <asm/io.h> +#include <dm/device_compat.h> #include <dm/lists.h> #include <dm/of_access.h> #include <linux/bitfield.h> diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 0b496149956..b002d6f1664 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -279,10 +279,10 @@ static struct xhci_segment *xhci_segment_alloc(void) { struct xhci_segment *seg; - seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment)); + seg = malloc(sizeof(struct xhci_segment)); BUG_ON(!seg); - seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE); + seg->trbs = xhci_malloc(SEGMENT_SIZE); seg->next = NULL; @@ -309,7 +309,7 @@ struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs) struct xhci_ring *ring; struct xhci_segment *prev; - ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring)); + ring = malloc(sizeof(struct xhci_ring)); BUG_ON(!ring); if (num_segs == 0) @@ -425,8 +425,7 @@ static struct xhci_container_ctx { struct xhci_container_ctx *ctx; - ctx = (struct xhci_container_ctx *) - malloc(sizeof(struct xhci_container_ctx)); + ctx = malloc(sizeof(struct xhci_container_ctx)); BUG_ON(!ctx); BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); @@ -436,7 +435,7 @@ static struct xhci_container_ctx if (type == XHCI_CTX_TYPE_INPUT) ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); - ctx->bytes = (u8 *)xhci_malloc(ctx->size); + ctx->bytes = xhci_malloc(ctx->size); return ctx; } @@ -458,8 +457,7 @@ int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id) return -EEXIST; } - ctrl->devs[slot_id] = (struct xhci_virt_device *) - malloc(sizeof(struct xhci_virt_device)); + ctrl->devs[slot_id] = malloc(sizeof(struct xhci_virt_device)); if (!ctrl->devs[slot_id]) { puts("Failed to allocate virtual device\n"); @@ -518,8 +516,7 @@ int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr, struct xhci_segment *seg; /* DCBAA initialization */ - ctrl->dcbaa = (struct xhci_device_context_array *) - xhci_malloc(sizeof(struct xhci_device_context_array)); + ctrl->dcbaa = xhci_malloc(sizeof(struct xhci_device_context_array)); if (ctrl->dcbaa == NULL) { puts("unable to allocate DCBA\n"); return -ENOMEM; @@ -555,8 +552,8 @@ int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr, /* Event ring does not maintain link TRB */ ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false); - ctrl->erst.entries = (struct xhci_erst_entry *) - xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS); + ctrl->erst.entries = xhci_malloc(sizeof(struct xhci_erst_entry) * + ERST_NUM_SEGS); ctrl->erst.num_entries = ERST_NUM_SEGS; diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index f3f181dae01..f62e232d218 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -7,15 +7,16 @@ #include <clk.h> #include <common.h> #include <dm.h> +#include <dm/device_compat.h> #include <dm/devres.h> #include <generic-phy.h> #include <malloc.h> +#include <power/regulator.h> #include <usb.h> +#include <usb/xhci.h> #include <linux/errno.h> #include <linux/compat.h> -#include <power/regulator.h> #include <linux/iopoll.h> -#include <usb/xhci.h> /* IPPC (IP Port Control) registers */ #define IPPC_IP_PW_CTRL0 0x00 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3547a9bad15..7080f8fabe7 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -22,11 +22,13 @@ #include <common.h> #include <cpu_func.h> #include <dm.h> +#include <dm/device_compat.h> #include <log.h> -#include <asm/byteorder.h> -#include <usb.h> #include <malloc.h> +#include <usb.h> +#include <usb/xhci.h> #include <watchdog.h> +#include <asm/byteorder.h> #include <asm/cache.h> #include <asm/unaligned.h> #include <linux/bitops.h> @@ -34,7 +36,6 @@ #include <linux/delay.h> #include <linux/errno.h> #include <linux/iopoll.h> -#include <usb/xhci.h> #ifndef CONFIG_USB_MAX_CONTROLLER_COUNT #define CONFIG_USB_MAX_CONTROLLER_COUNT 1 diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig new file mode 100644 index 00000000000..a2a59917132 --- /dev/null +++ b/drivers/usb/mtu3/Kconfig @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# For MTK USB3.0 IP + +config USB_MTU3 + bool "MediaTek USB3 Dual Role controller" + depends on USB_HOST || USB_GADGET + depends on ARCH_MEDIATEK + help + Say Y here if your system runs on MediaTek SoCs with + Dual Role SuperSpeed USB controller. You can select usb + mode as peripheral role or host role. + + If you don't know what this is, please say N. + +if USB_MTU3 +choice + bool "MTU3 Mode Selection" + default USB_MTU3_GADGET if USB_GADGET + default USB_MTU3_HOST if (USB_HOST && !USB_GADGET) + +config USB_MTU3_HOST + bool "Host only mode" + depends on USB_XHCI_HCD + help + Select this when you want to use MTU3 in host mode only, + thereby the gadget feature will be regressed. + +config USB_MTU3_GADGET + bool "Gadget only mode" + depends on USB_GADGET + select USB_GADGET_DUALSPEED + help + Select this when you want to use MTU3 in gadget mode only, + thereby the host feature will be regressed. + +endchoice + +config USB_MTU3_DEBUG + bool "Enable Debugging Messages" + help + Say Y here to enable debugging messages in the MTU3 Driver. + +endif diff --git a/drivers/usb/mtu3/Makefile b/drivers/usb/mtu3/Makefile new file mode 100644 index 00000000000..234f3a380a3 --- /dev/null +++ b/drivers/usb/mtu3/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-$(CONFIG_USB_MTU3_DEBUG) += -DDEBUG + +obj-$(CONFIG_USB_MTU3) += mtu3.o + +mtu3-y := mtu3_plat.o + +obj-$(CONFIG_USB_MTU3_GADGET) += mtu3_core.o mtu3_gadget_ep0.o mtu3_gadget.o +obj-$(CONFIG_USB_MTU3_GADGET) += mtu3_qmu.o +obj-$(CONFIG_USB_MTU3_HOST) += mtu3_host.o diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h new file mode 100644 index 00000000000..8a7ae83ee99 --- /dev/null +++ b/drivers/usb/mtu3/mtu3.h @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mtu3.h - MediaTek USB3 DRD header + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#ifndef __MTU3_H__ +#define __MTU3_H__ + +#include <asm/io.h> +#include <clk.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <dm/devres.h> +#include <generic-phy.h> +#include <linux/bug.h> +#include <linux/delay.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/otg.h> +#include <power/regulator.h> +#include <usb/xhci.h> + +struct mtu3; +struct mtu3_ep; +struct mtu3_request; +struct mtu3_host; + +#include "mtu3_hw_regs.h" +#include "mtu3_qmu.h" + +#define MU3D_EP_TXCR0(epnum) (U3D_TX1CSR0 + (((epnum) - 1) * 0x10)) +#define MU3D_EP_TXCR1(epnum) (U3D_TX1CSR1 + (((epnum) - 1) * 0x10)) +#define MU3D_EP_TXCR2(epnum) (U3D_TX1CSR2 + (((epnum) - 1) * 0x10)) + +#define MU3D_EP_RXCR0(epnum) (U3D_RX1CSR0 + (((epnum) - 1) * 0x10)) +#define MU3D_EP_RXCR1(epnum) (U3D_RX1CSR1 + (((epnum) - 1) * 0x10)) +#define MU3D_EP_RXCR2(epnum) (U3D_RX1CSR2 + (((epnum) - 1) * 0x10)) + +#define USB_QMU_RQCSR(epnum) (U3D_RXQCSR1 + (((epnum) - 1) * 0x10)) +#define USB_QMU_RQSAR(epnum) (U3D_RXQSAR1 + (((epnum) - 1) * 0x10)) +#define USB_QMU_RQCPR(epnum) (U3D_RXQCPR1 + (((epnum) - 1) * 0x10)) + +#define USB_QMU_TQCSR(epnum) (U3D_TXQCSR1 + (((epnum) - 1) * 0x10)) +#define USB_QMU_TQSAR(epnum) (U3D_TXQSAR1 + (((epnum) - 1) * 0x10)) +#define USB_QMU_TQCPR(epnum) (U3D_TXQCPR1 + (((epnum) - 1) * 0x10)) + +#define SSUSB_U3_CTRL(p) (U3D_SSUSB_U3_CTRL_0P + ((p) * 0x08)) +#define SSUSB_U2_CTRL(p) (U3D_SSUSB_U2_CTRL_0P + ((p) * 0x08)) + +#define MTU3_DRIVER_NAME "mtu3-gadget" +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +#define MTU3_EP_ENABLED BIT(0) +#define MTU3_EP_STALL BIT(1) +#define MTU3_EP_WEDGE BIT(2) +#define MTU3_EP_BUSY BIT(3) + +/* should be set as 1 */ +#define MTU3_U2_IP_SLOT_DEFAULT 1 +#define MTU3_U3_IP_SLOT_DEFAULT (MTU3_U2_IP_SLOT_DEFAULT) + +/** + * IP TRUNK version + * from 0x1003 version, USB3 Gen2 is supported, two changes affect driver: + * 1. MAXPKT and MULTI bits layout of TXCSR1 and RXCSR1 are adjusted, + * but not backward compatible + * 2. QMU extend buffer length supported + */ +#define MTU3_TRUNK_VERS_1003 0x1003 + +/** + * Normally the device works on HS or SS, to simplify fifo management, + * devide fifo into some 2*maxp parts, use bitmap to manage it; And + * 32 bits size of bitmap is large enough, that means it can manage + * up to 32KB/64KB fifo size. + * NOTE: MTU3_U2/3IP_EP_FIFO_UNIT should be power of two; + * FIFO size is allocated according to @slot which is 1 by default + */ +#define USB_HS_MAXP 512 +#define USB_SS_MAXP 1024 +#define MTU3_U2IP_EP_FIFO_UNIT \ + ((USB_HS_MAXP) * ((MTU3_U2_IP_SLOT_DEFAULT) + 1)) +#define MTU3_U3IP_EP_FIFO_UNIT \ + ((USB_SS_MAXP) * ((MTU3_U3_IP_SLOT_DEFAULT) + 1)) + +#define MTU3_FIFO_BIT_SIZE 32 +#define MTU3_U2_IP_EP0_FIFO_SIZE 64 + +/** + * Maximum size of ep0 response buffer for ch9 requests, + * the SET_SEL request uses 6 so far, and GET_STATUS is 2 + */ +#define EP0_RESPONSE_BUF 6 + +/* device operated link and speed got from DEVICE_CONF register */ +enum mtu3_speed { + MTU3_SPEED_INACTIVE = 0, + MTU3_SPEED_FULL = 1, + MTU3_SPEED_HIGH = 3, + MTU3_SPEED_SUPER = 4, + MTU3_SPEED_SUPER_PLUS = 5, +}; + +/** + * @MU3D_EP0_STATE_SETUP: waits for SETUP or received a SETUP + * without data stage. + * @MU3D_EP0_STATE_TX: IN data stage + * @MU3D_EP0_STATE_RX: OUT data stage + * @MU3D_EP0_STATE_TX_END: the last IN data is transferred, and + * waits for its completion interrupt + * @MU3D_EP0_STATE_STALL: ep0 is in stall status, will be auto-cleared + * after receives a SETUP. + */ +enum mtu3_g_ep0_state { + MU3D_EP0_STATE_SETUP = 1, + MU3D_EP0_STATE_TX, + MU3D_EP0_STATE_RX, + MU3D_EP0_STATE_TX_END, + MU3D_EP0_STATE_STALL, +}; + +/** + * MTU3_DR_FORCE_NONE: automatically switch host and peripheral mode + * by IDPIN signal. + * MTU3_DR_FORCE_HOST: force to enter host mode and override OTG + * IDPIN signal. + * MTU3_DR_FORCE_DEVICE: force to enter peripheral mode. + */ +enum mtu3_dr_force_mode { + MTU3_DR_FORCE_NONE = 0, + MTU3_DR_FORCE_HOST, + MTU3_DR_FORCE_DEVICE, +}; + +/** + * @mac_base: register base address of MAC, include xHCI and device + * @ippc_base: register base address of IP Power and Clock interface (IPPC) + * @vusb33_supply: usb3.3V shared by device/host IP + * @vbus_supply: vbus 5v of OTG port + * @clks: optional clocks, include "sys_ck", "ref_ck", "mcu_ck", + * "dma_ck" and "xhci_ck" + * @phys: phys used + * @dr_mode: works in which mode: + * host only, device only or dual-role mode + */ +struct ssusb_mtk { + struct udevice *dev; + struct mtu3 *u3d; + struct mtu3_host *u3h; + void __iomem *mac_base; + void __iomem *ippc_base; + /* common power & clock */ + struct udevice *vusb33_supply; + struct udevice *vbus_supply; + struct clk_bulk clks; + struct phy_bulk phys; + /* otg */ + enum usb_dr_mode dr_mode; +}; + +/** + * @ctrl: xHCI controller, needs to come first in this struct! + * @hcd: xHCI's register base address + * @u2_ports: number of usb2 host ports + * @u3_ports: number of usb3 host ports + * @u3p_dis_msk: mask of disabling usb3 ports, for example, bit0==1 to + * disable u3port0, bit1==1 to disable u3port1,... etc + */ +struct mtu3_host { + struct xhci_ctrl ctrl; + struct xhci_hccr *hcd; + void __iomem *ippc_base; + struct ssusb_mtk *ssusb; + struct udevice *dev; + u32 u2_ports; + u32 u3_ports; + u32 u3p_dis_msk; +}; + +/** + * @base: the base address of fifo + * @limit: the bitmap size in bits + * @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT + */ +struct mtu3_fifo_info { + u32 base; + u32 limit; + DECLARE_BITMAP(bitmap, MTU3_FIFO_BIT_SIZE); +}; + +/** + * General Purpose Descriptor (GPD): + * The format of TX GPD is a little different from RX one. + * And the size of GPD is 16 bytes. + * + * @flag: + * bit0: Hardware Own (HWO) + * bit1: Buffer Descriptor Present (BDP), always 0, BD is not supported + * bit2: Bypass (BPS), 1: HW skips this GPD if HWO = 1 + * bit7: Interrupt On Completion (IOC) + * @chksum: This is used to validate the contents of this GPD; + * If TXQ_CS_EN / RXQ_CS_EN bit is set, an interrupt is issued + * when checksum validation fails; + * Checksum value is calculated over the 16 bytes of the GPD by default; + * @data_buf_len (RX ONLY): This value indicates the length of + * the assigned data buffer + * @next_gpd: Physical address of the next GPD + * @buffer: Physical address of the data buffer + * @buf_len: + * (TX): This value indicates the length of the assigned data buffer + * (RX): The total length of data received + * @ext_len: reserved + * @ext_flag: + * bit5 (TX ONLY): Zero Length Packet (ZLP), + */ +struct qmu_gpd { + __u8 flag; + __u8 chksum; + __le16 data_buf_len; + __le32 next_gpd; + __le32 buffer; + __le16 buf_len; + __u8 ext_len; + __u8 ext_flag; +} __packed; + +/** + * dma: physical base address of GPD segment + * start: virtual base address of GPD segment + * end: the last GPD element + * enqueue: the first empty GPD to use + * dequeue: the first completed GPD serviced by ISR + * NOTE: the size of GPD ring should be >= 2 + */ +struct mtu3_gpd_ring { + dma_addr_t dma; + struct qmu_gpd *start; + struct qmu_gpd *end; + struct qmu_gpd *enqueue; + struct qmu_gpd *dequeue; +}; + +/** + * @fifo_size: it is (@slot + 1) * @fifo_seg_size + * @fifo_seg_size: it is roundup_pow_of_two(@maxp) + */ +struct mtu3_ep { + struct usb_ep ep; + char name[12]; + struct mtu3 *mtu; + u8 epnum; + u8 type; + u8 is_in; + u16 maxp; + int slot; + u32 fifo_size; + u32 fifo_addr; + u32 fifo_seg_size; + struct mtu3_fifo_info *fifo; + + struct list_head req_list; + struct mtu3_gpd_ring gpd_ring; + const struct usb_ss_ep_comp_descriptor *comp_desc; + const struct usb_endpoint_descriptor *desc; + + int flags; +}; + +struct mtu3_request { + struct usb_request request; + struct list_head list; + struct mtu3_ep *mep; + struct mtu3 *mtu; + struct qmu_gpd *gpd; + int epnum; +}; + +static inline struct ssusb_mtk *dev_to_ssusb(struct udevice *dev) +{ + return dev_get_priv(dev); +} + +/** + * struct mtu3 - device driver instance data. + * @slot: MTU3_U2_IP_SLOT_DEFAULT for U2 IP only, + * MTU3_U3_IP_SLOT_DEFAULT for U3 IP + * @may_wakeup: means device's remote wakeup is enabled + * @is_self_powered: is reported in device status and the config descriptor + * @delayed_status: true when function drivers ask for delayed status + * @gen2cp: compatible with USB3 Gen2 IP + * @ep0_req: dummy request used while handling standard USB requests + * for GET_STATUS and SET_SEL + * @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests + */ +struct mtu3 { + spinlock_t lock; + struct ssusb_mtk *ssusb; + struct udevice *dev; + void __iomem *mac_base; + void __iomem *ippc_base; + int irq; + + struct mtu3_fifo_info tx_fifo; + struct mtu3_fifo_info rx_fifo; + + struct mtu3_ep *ep_array; + struct mtu3_ep *in_eps; + struct mtu3_ep *out_eps; + struct mtu3_ep *ep0; + int num_eps; + int slot; + int active_ep; + + enum mtu3_g_ep0_state ep0_state; + struct usb_gadget g; /* the gadget */ + struct usb_gadget_driver *gadget_driver; + struct mtu3_request ep0_req; + u8 setup_buf[EP0_RESPONSE_BUF]; + enum usb_device_speed max_speed; + enum usb_device_speed speed; + + unsigned is_active:1; + unsigned may_wakeup:1; + unsigned is_self_powered:1; + unsigned test_mode:1; + unsigned softconnect:1; + unsigned u1_enable:1; + unsigned u2_enable:1; + unsigned is_u3_ip:1; + unsigned delayed_status:1; + unsigned gen2cp:1; + unsigned force_vbus:1; + + u8 address; + u8 test_mode_nr; + u32 hw_version; +}; + +static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g) +{ + return container_of(g, struct mtu3, g); +} + +static inline int is_first_entry(const struct list_head *list, + const struct list_head *head) +{ + return list_is_last(head, list); +} + +static inline struct mtu3_request *to_mtu3_request(struct usb_request *req) +{ + return req ? container_of(req, struct mtu3_request, request) : NULL; +} + +static inline struct mtu3_ep *to_mtu3_ep(struct usb_ep *ep) +{ + return ep ? container_of(ep, struct mtu3_ep, ep) : NULL; +} + +static inline struct mtu3_request *next_request(struct mtu3_ep *mep) +{ + if (list_empty(&mep->req_list)) + return NULL; + + return list_first_entry(&mep->req_list, struct mtu3_request, list); +} + +static inline void mtu3_writel(void __iomem *base, u32 offset, u32 data) +{ + writel(data, base + offset); +} + +static inline u32 mtu3_readl(void __iomem *base, u32 offset) +{ + return readl(base + offset); +} + +static inline void mtu3_setbits(void __iomem *base, u32 offset, u32 bits) +{ + void __iomem *addr = base + offset; + u32 tmp = readl(addr); + + writel((tmp | (bits)), addr); +} + +static inline void mtu3_clrbits(void __iomem *base, u32 offset, u32 bits) +{ + void __iomem *addr = base + offset; + u32 tmp = readl(addr); + + writel((tmp & ~(bits)), addr); +} + +int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks); +struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); +void mtu3_free_request(struct usb_ep *ep, struct usb_request *req); +void mtu3_req_complete(struct mtu3_ep *mep, + struct usb_request *req, int status); + +int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep, + int interval, int burst, int mult); +void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep); +void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set); +void mtu3_ep0_setup(struct mtu3 *mtu); +void mtu3_start(struct mtu3 *mtu); +void mtu3_stop(struct mtu3 *mtu); +void mtu3_dev_on_off(struct mtu3 *mtu, int is_on); +void mtu3_set_speed(struct mtu3 *mtu, enum usb_device_speed speed); + +int mtu3_gadget_setup(struct mtu3 *mtu); +void mtu3_gadget_cleanup(struct mtu3 *mtu); +void mtu3_gadget_reset(struct mtu3 *mtu); +void mtu3_gadget_suspend(struct mtu3 *mtu); +void mtu3_gadget_resume(struct mtu3 *mtu); +void mtu3_gadget_disconnect(struct mtu3 *mtu); + +irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu); +extern const struct usb_ep_ops mtu3_ep0_ops; + +#endif diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c new file mode 100644 index 00000000000..28136f88f45 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_core.c @@ -0,0 +1,838 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mtu3_core.c - hardware access layer and gadget init/exit of + * MediaTek usb3 Dual-Role Controller Driver + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#include <linux/log2.h> +#include <linux/bitmap.h> + +#include "mtu3.h" +#include "mtu3_dr.h" + +static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size) +{ + struct mtu3_fifo_info *fifo = mep->fifo; + struct mtu3 *mtu = mep->mtu; + u32 fz_bit; + + mep->fifo_seg_size = mtu->is_u3_ip ? USB_SS_MAXP : USB_HS_MAXP; + + fz_bit = find_first_zero_bit(fifo->bitmap, fifo->limit); + if (fz_bit >= fifo->limit) + return -EOVERFLOW; + + mep->fifo_size = mep->fifo_seg_size * (mep->slot + 1); + mep->fifo_addr = fifo->base + mep->fifo_size * fz_bit; + generic_set_bit(fz_bit, fifo->bitmap); + + dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, bit: %d\n", + __func__, mep->fifo_seg_size, mep->fifo_size, fz_bit); + + return mep->fifo_addr; +} + +static void ep_fifo_free(struct mtu3_ep *mep) +{ + struct mtu3_fifo_info *fifo = mep->fifo; + u32 addr = mep->fifo_addr; + u32 bit; + + if (unlikely(addr < fifo->base)) + return; + + bit = (addr - fifo->base) / mep->fifo_size; + generic_clear_bit(bit, fifo->bitmap); + mep->fifo_size = 0; + mep->fifo_seg_size = 0; + + dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, bit: %d\n", + __func__, mep->fifo_seg_size, mep->fifo_size, bit); +} + +/* enable/disable U3D SS function */ +static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable) +{ + /* If usb3_en==0, LTSSM will go to SS.Disable state */ + if (enable) + mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); + else + mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); + + dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable); +} + +/* set/clear U3D HS device soft connect */ +static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable) +{ + if (enable) { + mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, + SOFT_CONN | SUSPENDM_ENABLE); + } else { + mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, + SOFT_CONN | SUSPENDM_ENABLE); + } + dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable); +} + +/* only port0 of U2/U3 supports device mode */ +static int mtu3_device_enable(struct mtu3 *mtu) +{ + void __iomem *ibase = mtu->ippc_base; + u32 check_clk = 0; + + mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); + + if (mtu->is_u3_ip) { + check_clk = SSUSB_U3_MAC_RST_B_STS; + mtu3_clrbits(ibase, SSUSB_U3_CTRL(0), + (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN | + SSUSB_U3_PORT_HOST_SEL)); + } + mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), + (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | + SSUSB_U2_PORT_HOST_SEL)); + + return ssusb_check_clocks(mtu->ssusb, check_clk); +} + +static void mtu3_device_disable(struct mtu3 *mtu) +{ + void __iomem *ibase = mtu->ippc_base; + + if (mtu->is_u3_ip) + mtu3_setbits(ibase, SSUSB_U3_CTRL(0), + (SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN)); + + mtu3_setbits(ibase, SSUSB_U2_CTRL(0), + SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN); + + mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); +} + +/* reset U3D's device module. */ +static void mtu3_device_reset(struct mtu3 *mtu) +{ + void __iomem *ibase = mtu->ippc_base; + + mtu3_setbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST); + udelay(1); + mtu3_clrbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST); +} + +static void mtu3_intr_status_clear(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + + /* Clear EP0 and Tx/Rx EPn interrupts status */ + mtu3_writel(mbase, U3D_EPISR, ~0x0); + /* Clear U2 USB common interrupts status */ + mtu3_writel(mbase, U3D_COMMON_USB_INTR, ~0x0); + /* Clear U3 LTSSM interrupts status */ + mtu3_writel(mbase, U3D_LTSSM_INTR, ~0x0); + /* Clear speed change interrupt status */ + mtu3_writel(mbase, U3D_DEV_LINK_INTR, ~0x0); + /* Clear QMU interrupt status */ + mtu3_writel(mbase, U3D_QISAR0, ~0x0); +} + +/* disable all interrupts */ +static void mtu3_intr_disable(struct mtu3 *mtu) +{ + /* Disable level 1 interrupts */ + mtu3_writel(mtu->mac_base, U3D_LV1IECR, ~0x0); + /* Disable endpoint interrupts */ + mtu3_writel(mtu->mac_base, U3D_EPIECR, ~0x0); + mtu3_intr_status_clear(mtu); +} + +/* enable system global interrupt */ +static void mtu3_intr_enable(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 value; + + /*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */ + value = BMU_INTR | QMU_INTR | MAC3_INTR | MAC2_INTR | EP_CTRL_INTR; + mtu3_writel(mbase, U3D_LV1IESR, value); + + /* Enable U2 common USB interrupts */ + value = SUSPEND_INTR | RESUME_INTR | RESET_INTR; + mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value); + + if (mtu->is_u3_ip) { + /* Enable U3 LTSSM interrupts */ + value = HOT_RST_INTR | WARM_RST_INTR | + ENTER_U3_INTR | EXIT_U3_INTR; + mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value); + } + + /* Enable QMU interrupts. */ + value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT | + RXQ_LENERR_INT | RXQ_ZLPERR_INT; + mtu3_writel(mbase, U3D_QIESR1, value); + + /* Enable speed change interrupt */ + mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR); +} + +void mtu3_set_speed(struct mtu3 *mtu, enum usb_device_speed speed) +{ + void __iomem *mbase = mtu->mac_base; + + if (speed > mtu->max_speed) + speed = mtu->max_speed; + + switch (speed) { + case USB_SPEED_FULL: + /* disable U3 SS function */ + mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN); + /* disable HS function */ + mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE); + break; + case USB_SPEED_HIGH: + mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN); + /* HS/FS detected by HW */ + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE); + break; + case USB_SPEED_SUPER: + mtu3_clrbits(mtu->ippc_base, SSUSB_U3_CTRL(0), + SSUSB_U3_PORT_SSP_SPEED); + break; + case USB_SPEED_SUPER_PLUS: + mtu3_setbits(mtu->ippc_base, SSUSB_U3_CTRL(0), + SSUSB_U3_PORT_SSP_SPEED); + break; + default: + dev_err(mtu->dev, "invalid speed: %d\n", speed); + return; + } + + mtu->speed = speed; + dev_dbg(mtu->dev, "set speed: %s\n", usb_speed_string(mtu->speed)); +} + +/* reset: u2 - data toggle, u3 - SeqN, flow control status etc */ +static void mtu3_ep_reset(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + u32 rst_bit = EP_RST(mep->is_in, mep->epnum); + + mtu3_setbits(mtu->mac_base, U3D_EP_RST, rst_bit); + mtu3_clrbits(mtu->mac_base, U3D_EP_RST, rst_bit); +} + +/* set/clear the stall and toggle bits for non-ep0 */ +void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set) +{ + struct mtu3 *mtu = mep->mtu; + void __iomem *mbase = mtu->mac_base; + u8 epnum = mep->epnum; + u32 csr; + + if (mep->is_in) { /* TX */ + csr = mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)) & TX_W1C_BITS; + if (set) + csr |= TX_SENDSTALL; + else + csr = (csr & (~TX_SENDSTALL)) | TX_SENTSTALL; + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr); + } else { /* RX */ + csr = mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)) & RX_W1C_BITS; + if (set) + csr |= RX_SENDSTALL; + else + csr = (csr & (~RX_SENDSTALL)) | RX_SENTSTALL; + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr); + } + + if (!set) { + mtu3_ep_reset(mep); + mep->flags &= ~MTU3_EP_STALL; + } else { + mep->flags |= MTU3_EP_STALL; + } + + dev_dbg(mtu->dev, "%s: %s\n", mep->name, + set ? "SEND STALL" : "CLEAR STALL, with EP RESET"); +} + +void mtu3_dev_on_off(struct mtu3 *mtu, int is_on) +{ + if (mtu->is_u3_ip && mtu->speed >= USB_SPEED_SUPER) + mtu3_ss_func_set(mtu, is_on); + else + mtu3_hs_softconn_set(mtu, is_on); + + dev_info(mtu->dev, "gadget (%s) pullup D%s\n", + usb_speed_string(mtu->speed), is_on ? "+" : "-"); +} + +void mtu3_start(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + + dev_dbg(mtu->dev, "%s devctl 0x%x\n", __func__, + mtu3_readl(mbase, U3D_DEVICE_CONTROL)); + + /* Initialize the default interrupts */ + mtu3_intr_enable(mtu); + mtu->is_active = 1; + + if (mtu->softconnect) + mtu3_dev_on_off(mtu, 1); +} + +void mtu3_stop(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "%s\n", __func__); + + mtu3_intr_disable(mtu); + + if (mtu->softconnect) + mtu3_dev_on_off(mtu, 0); + + mtu->is_active = 0; +} + +/* for non-ep0 */ +int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep, + int interval, int burst, int mult) +{ + void __iomem *mbase = mtu->mac_base; + bool gen2cp = mtu->gen2cp; + int epnum = mep->epnum; + u32 csr0, csr1, csr2; + int fifo_sgsz, fifo_addr; + int num_pkts; + + fifo_addr = ep_fifo_alloc(mep, mep->maxp); + if (fifo_addr < 0) { + dev_err(mtu->dev, "alloc ep fifo failed(%d)\n", mep->maxp); + return -ENOMEM; + } + fifo_sgsz = ilog2(mep->fifo_seg_size); + dev_dbg(mtu->dev, "%s fifosz: %x(%x/%x)\n", __func__, fifo_sgsz, + mep->fifo_seg_size, mep->fifo_size); + + if (mep->is_in) { + csr0 = TX_TXMAXPKTSZ(mep->maxp); + csr0 |= TX_DMAREQEN; + + num_pkts = (burst + 1) * (mult + 1) - 1; + csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot); + csr1 |= TX_MAX_PKT(gen2cp, num_pkts) | TX_MULT(gen2cp, mult); + + csr2 = TX_FIFOADDR(fifo_addr >> 4); + csr2 |= TX_FIFOSEGSIZE(fifo_sgsz); + + switch (mep->type) { + case USB_ENDPOINT_XFER_BULK: + csr1 |= TX_TYPE(TYPE_BULK); + break; + case USB_ENDPOINT_XFER_ISOC: + csr1 |= TX_TYPE(TYPE_ISO); + csr2 |= TX_BINTERVAL(interval); + break; + case USB_ENDPOINT_XFER_INT: + csr1 |= TX_TYPE(TYPE_INT); + csr2 |= TX_BINTERVAL(interval); + break; + } + + /* Enable QMU Done interrupt */ + mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum)); + + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr0); + mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), csr1); + mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), csr2); + + dev_dbg(mtu->dev, "U3D_TX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n", + epnum, mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)), + mtu3_readl(mbase, MU3D_EP_TXCR1(epnum)), + mtu3_readl(mbase, MU3D_EP_TXCR2(epnum))); + } else { + csr0 = RX_RXMAXPKTSZ(mep->maxp); + csr0 |= RX_DMAREQEN; + + num_pkts = (burst + 1) * (mult + 1) - 1; + csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot); + csr1 |= RX_MAX_PKT(gen2cp, num_pkts) | RX_MULT(gen2cp, mult); + + csr2 = RX_FIFOADDR(fifo_addr >> 4); + csr2 |= RX_FIFOSEGSIZE(fifo_sgsz); + + switch (mep->type) { + case USB_ENDPOINT_XFER_BULK: + csr1 |= RX_TYPE(TYPE_BULK); + break; + case USB_ENDPOINT_XFER_ISOC: + csr1 |= RX_TYPE(TYPE_ISO); + csr2 |= RX_BINTERVAL(interval); + break; + case USB_ENDPOINT_XFER_INT: + csr1 |= RX_TYPE(TYPE_INT); + csr2 |= RX_BINTERVAL(interval); + break; + } + + /*Enable QMU Done interrupt */ + mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum)); + + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr0); + mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), csr1); + mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), csr2); + + dev_dbg(mtu->dev, "U3D_RX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n", + epnum, mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)), + mtu3_readl(mbase, MU3D_EP_RXCR1(epnum)), + mtu3_readl(mbase, MU3D_EP_RXCR2(epnum))); + } + + dev_dbg(mtu->dev, "csr0:%#x, csr1:%#x, csr2:%#x\n", csr0, csr1, csr2); + dev_dbg(mtu->dev, "%s: %s, fifo-addr:%#x, fifo-size:%#x(%#x/%#x)\n", + __func__, mep->name, mep->fifo_addr, mep->fifo_size, + fifo_sgsz, mep->fifo_seg_size); + + return 0; +} + +/* for non-ep0 */ +void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep) +{ + void __iomem *mbase = mtu->mac_base; + int epnum = mep->epnum; + + if (mep->is_in) { + mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), 0); + mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), 0); + mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), 0); + mtu3_setbits(mbase, U3D_QIECR0, QMU_TX_DONE_INT(epnum)); + } else { + mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), 0); + mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), 0); + mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), 0); + mtu3_setbits(mbase, U3D_QIECR0, QMU_RX_DONE_INT(epnum)); + } + + mtu3_ep_reset(mep); + ep_fifo_free(mep); + + dev_dbg(mtu->dev, "%s: %s\n", __func__, mep->name); +} + +/* + * Two scenarios: + * 1. when device IP supports SS, the fifo of EP0, TX EPs, RX EPs + * are separated; + * 2. when supports only HS, the fifo is shared for all EPs, and + * the capability registers of @EPNTXFFSZ or @EPNRXFFSZ indicate + * the total fifo size of non-ep0, and ep0's is fixed to 64B, + * so the total fifo size is 64B + @EPNTXFFSZ; + * Due to the first 64B should be reserved for EP0, non-ep0's fifo + * starts from offset 64 and are divided into two equal parts for + * TX or RX EPs for simplification. + */ +static void get_ep_fifo_config(struct mtu3 *mtu) +{ + struct mtu3_fifo_info *tx_fifo; + struct mtu3_fifo_info *rx_fifo; + u32 fifosize; + + if (mtu->is_u3_ip) { + fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ); + tx_fifo = &mtu->tx_fifo; + tx_fifo->base = 0; + tx_fifo->limit = fifosize / MTU3_U3IP_EP_FIFO_UNIT; + bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); + + fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNRXFFSZ); + rx_fifo = &mtu->rx_fifo; + rx_fifo->base = 0; + rx_fifo->limit = fifosize / MTU3_U3IP_EP_FIFO_UNIT; + bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); + mtu->slot = MTU3_U3_IP_SLOT_DEFAULT; + } else { + fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ); + tx_fifo = &mtu->tx_fifo; + tx_fifo->base = MTU3_U2_IP_EP0_FIFO_SIZE; + tx_fifo->limit = (fifosize / MTU3_U2IP_EP_FIFO_UNIT) >> 1; + bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); + + rx_fifo = &mtu->rx_fifo; + rx_fifo->base = tx_fifo->base + + tx_fifo->limit * MTU3_U2IP_EP_FIFO_UNIT; + rx_fifo->limit = tx_fifo->limit; + bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE); + mtu->slot = MTU3_U2_IP_SLOT_DEFAULT; + } + + dev_dbg(mtu->dev, "%s, TX: base-%d, limit-%d; RX: base-%d, limit-%d\n", + __func__, tx_fifo->base, tx_fifo->limit, + rx_fifo->base, rx_fifo->limit); +} + +void mtu3_ep0_setup(struct mtu3 *mtu) +{ + u32 maxpacket = mtu->g.ep0->maxpacket; + u32 csr; + + dev_dbg(mtu->dev, "%s maxpacket: %d\n", __func__, maxpacket); + + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR); + csr &= ~EP0_MAXPKTSZ_MSK; + csr |= EP0_MAXPKTSZ(maxpacket); + csr &= EP0_W1C_BITS; + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr); + + /* Enable EP0 interrupt */ + mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR | SETUPENDISR); +} + +static int mtu3_mem_alloc(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + struct mtu3_ep *ep_array; + int in_ep_num, out_ep_num; + u32 cap_epinfo; + int i; + + cap_epinfo = mtu3_readl(mbase, U3D_CAP_EPINFO); + in_ep_num = CAP_TX_EP_NUM(cap_epinfo); + out_ep_num = CAP_RX_EP_NUM(cap_epinfo); + + dev_info(mtu->dev, "fifosz/epnum: Tx=%#x/%d, Rx=%#x/%d\n", + mtu3_readl(mbase, U3D_CAP_EPNTXFFSZ), in_ep_num, + mtu3_readl(mbase, U3D_CAP_EPNRXFFSZ), out_ep_num); + + /* one for ep0, another is reserved */ + mtu->num_eps = min(in_ep_num, out_ep_num) + 1; + ep_array = kcalloc(mtu->num_eps * 2, sizeof(*ep_array), GFP_KERNEL); + if (!ep_array) + return -ENOMEM; + + mtu->ep_array = ep_array; + mtu->in_eps = ep_array; + mtu->out_eps = &ep_array[mtu->num_eps]; + /* ep0 uses in_eps[0], out_eps[0] is reserved */ + mtu->ep0 = mtu->in_eps; + mtu->ep0->mtu = mtu; + mtu->ep0->epnum = 0; + + for (i = 1; i < mtu->num_eps; i++) { + struct mtu3_ep *mep = mtu->in_eps + i; + + mep->fifo = &mtu->tx_fifo; + mep = mtu->out_eps + i; + mep->fifo = &mtu->rx_fifo; + } + + get_ep_fifo_config(mtu); + mtu3_qmu_init(mtu); + + return 0; +} + +static void mtu3_mem_free(struct mtu3 *mtu) +{ + mtu3_qmu_exit(mtu); + kfree(mtu->ep_array); +} + +static void mtu3_regs_init(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + + /* be sure interrupts are disabled before registration of ISR */ + mtu3_intr_disable(mtu); + + if (mtu->is_u3_ip) { + /* disable LGO_U1/U2 by default */ + mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL, + SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE); + /* enable accept LGO_U1/U2 link command from host */ + mtu3_setbits(mbase, U3D_LINK_POWER_CONTROL, + SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE); + /* device responses to u3_exit from host automatically */ + mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN); + /* automatically build U2 link when U3 detect fail */ + mtu3_setbits(mbase, U3D_USB2_TEST_MODE, U2U3_AUTO_SWITCH); + /* auto clear SOFT_CONN when clear USB3_EN if work as HS */ + mtu3_setbits(mbase, U3D_U3U2_SWITCH_CTRL, SOFTCON_CLR_AUTO_EN); + } + + /* delay about 0.1us from detecting reset to send chirp-K */ + mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK); + /* U2/U3 detected by HW */ + mtu3_writel(mbase, U3D_DEVICE_CONF, 0); + /* enable automatical HWRW from L1 */ + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, LPM_HRWE); + + mtu3_set_speed(mtu, mtu->max_speed); + ssusb_set_force_mode(mtu->ssusb, MTU3_DR_FORCE_DEVICE); + + if (mtu->force_vbus) + mtu3_setbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON); + else /* vbus detected by HW */ + mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON); +} + +static irqreturn_t mtu3_link_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + enum usb_device_speed udev_speed; + u32 maxpkt = 64; + u32 link; + u32 speed; + + link = mtu3_readl(mbase, U3D_DEV_LINK_INTR); + link &= mtu3_readl(mbase, U3D_DEV_LINK_INTR_ENABLE); + mtu3_writel(mbase, U3D_DEV_LINK_INTR, link); /* W1C */ + dev_dbg(mtu->dev, "=== LINK[%x] ===\n", link); + + if (!(link & SSUSB_DEV_SPEED_CHG_INTR)) + return IRQ_NONE; + + speed = SSUSB_DEV_SPEED(mtu3_readl(mbase, U3D_DEVICE_CONF)); + + switch (speed) { + case MTU3_SPEED_FULL: + udev_speed = USB_SPEED_FULL; + /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */ + mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf) + | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa)); + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, + LPM_BESL_STALL | LPM_BESLD_STALL); + break; + case MTU3_SPEED_HIGH: + udev_speed = USB_SPEED_HIGH; + /*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */ + mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf) + | LPM_BESLCK(4) | LPM_BESLCK_U3(0xa)); + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, + LPM_BESL_STALL | LPM_BESLD_STALL); + break; + case MTU3_SPEED_SUPER: + udev_speed = USB_SPEED_SUPER; + maxpkt = 512; + break; + case MTU3_SPEED_SUPER_PLUS: + udev_speed = USB_SPEED_SUPER_PLUS; + maxpkt = 512; + break; + default: + udev_speed = USB_SPEED_UNKNOWN; + break; + } + dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed)); + + mtu->g.speed = udev_speed; + mtu->g.ep0->maxpacket = maxpkt; + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + + if (udev_speed == USB_SPEED_UNKNOWN) + mtu3_gadget_disconnect(mtu); + else + mtu3_ep0_setup(mtu); + + return IRQ_HANDLED; +} + +static irqreturn_t mtu3_u3_ltssm_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 ltssm; + + ltssm = mtu3_readl(mbase, U3D_LTSSM_INTR); + ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE); + mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */ + dev_dbg(mtu->dev, "=== LTSSM[%x] ===\n", ltssm); + + if (ltssm & (HOT_RST_INTR | WARM_RST_INTR)) + mtu3_gadget_reset(mtu); + + if (ltssm & VBUS_FALL_INTR) { + mtu3_ss_func_set(mtu, false); + mtu3_gadget_reset(mtu); + } + + if (ltssm & VBUS_RISE_INTR) + mtu3_ss_func_set(mtu, true); + + if (ltssm & EXIT_U3_INTR) + mtu3_gadget_resume(mtu); + + if (ltssm & ENTER_U3_INTR) + mtu3_gadget_suspend(mtu); + + return IRQ_HANDLED; +} + +static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 u2comm; + + u2comm = mtu3_readl(mbase, U3D_COMMON_USB_INTR); + u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE); + mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */ + dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm); + + if (u2comm & SUSPEND_INTR) + mtu3_gadget_suspend(mtu); + + if (u2comm & RESUME_INTR) + mtu3_gadget_resume(mtu); + + if (u2comm & RESET_INTR) + mtu3_gadget_reset(mtu); + + return IRQ_HANDLED; +} + +irqreturn_t mtu3_irq(int irq, void *data) +{ + struct mtu3 *mtu = (struct mtu3 *)data; + unsigned long flags; + u32 level1; + + spin_lock_irqsave(&mtu->lock, flags); + + /* U3D_LV1ISR is RU */ + level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR); + level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER); + + if (level1 & EP_CTRL_INTR) + mtu3_link_isr(mtu); + + if (level1 & MAC2_INTR) + mtu3_u2_common_isr(mtu); + + if (level1 & MAC3_INTR) + mtu3_u3_ltssm_isr(mtu); + + if (level1 & BMU_INTR) + mtu3_ep0_isr(mtu); + + if (level1 & QMU_INTR) + mtu3_qmu_isr(mtu); + + spin_unlock_irqrestore(&mtu->lock, flags); + + return IRQ_HANDLED; +} + +static void mtu3_check_params(struct mtu3 *mtu) +{ + /* check the max_speed parameter */ + switch (mtu->max_speed) { + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + break; + default: + dev_err(mtu->dev, "invalid max_speed: %d\n", mtu->max_speed); + /* fall through */ + case USB_SPEED_UNKNOWN: + /* default as SS */ + mtu->max_speed = USB_SPEED_SUPER; + break; + } + + if (!mtu->is_u3_ip && (mtu->max_speed > USB_SPEED_HIGH)) + mtu->max_speed = USB_SPEED_HIGH; + + mtu->speed = mtu->max_speed; + + dev_info(mtu->dev, "max_speed: %s\n", usb_speed_string(mtu->speed)); +} + +static int mtu3_hw_init(struct mtu3 *mtu) +{ + u32 value; + int ret; + + value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_TRUNK_VERS); + mtu->hw_version = IP_TRUNK_VERS(value); + mtu->gen2cp = !!(mtu->hw_version >= MTU3_TRUNK_VERS_1003); + + value = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP); + mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(value); + + dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version, + mtu->is_u3_ip ? "U3" : "U2"); + + mtu3_check_params(mtu); + + mtu3_device_reset(mtu); + + ret = mtu3_device_enable(mtu); + if (ret) { + dev_err(mtu->dev, "device enable failed %d\n", ret); + return ret; + } + + ret = mtu3_mem_alloc(mtu); + if (ret) + return ret; + + mtu3_regs_init(mtu); + + return 0; +} + +static void mtu3_hw_exit(struct mtu3 *mtu) +{ + mtu3_device_disable(mtu); + mtu3_mem_free(mtu); +} + +int ssusb_gadget_init(struct ssusb_mtk *ssusb) +{ + struct mtu3 *mtu = ssusb->u3d; + struct udevice *dev = mtu->dev; + int ret = -ENOMEM; + + spin_lock_init(&mtu->lock); + mtu->ippc_base = ssusb->ippc_base; + mtu->mac_base = ssusb->mac_base; + mtu->ssusb = ssusb; + mtu->max_speed = usb_get_maximum_speed(dev->node); + mtu->force_vbus = dev_read_bool(dev, "mediatek,force-vbus"); + + ret = mtu3_hw_init(mtu); + if (ret) { + dev_err(dev, "mtu3 hw init failed:%d\n", ret); + return ret; + } + + ret = mtu3_gadget_setup(mtu); + if (ret) { + dev_err(dev, "mtu3 gadget init failed:%d\n", ret); + goto gadget_err; + } + + dev_info(dev, "%s() done...\n", __func__); + + return 0; + +gadget_err: + mtu3_hw_exit(mtu); + ssusb->u3d = NULL; + dev_err(dev, "%s() fail...\n", __func__); + + return ret; +} + +void ssusb_gadget_exit(struct ssusb_mtk *ssusb) +{ + struct mtu3 *mtu = ssusb->u3d; + + mtu3_gadget_cleanup(mtu); + mtu3_hw_exit(mtu); +} diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h new file mode 100644 index 00000000000..ec0e50c04ca --- /dev/null +++ b/drivers/usb/mtu3/mtu3_dr.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mtu3_dr.h - dual role switch and host glue layer header + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#ifndef _MTU3_DR_H_ +#define _MTU3_DR_H_ + +#if IS_ENABLED(CONFIG_USB_MTU3_HOST) + +int ssusb_host_init(struct ssusb_mtk *ssusb); +void ssusb_host_exit(struct ssusb_mtk *ssusb); + +#else + +static inline int ssusb_host_init(struct ssusb_mtk *ssusb) +{ + return 0; +} + +static inline void ssusb_host_exit(struct ssusb_mtk *ssusb) +{} + +#endif + +#if IS_ENABLED(CONFIG_USB_MTU3_GADGET) +int ssusb_gadget_init(struct ssusb_mtk *ssusb); +void ssusb_gadget_exit(struct ssusb_mtk *ssusb); +irqreturn_t mtu3_irq(int irq, void *data); +#else +static inline int ssusb_gadget_init(struct ssusb_mtk *ssusb) +{ + return 0; +} + +static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb) +{} + +static inline irqreturn_t mtu3_irq(int irq, void *data) +{ + return IRQ_NONE; +} +#endif + +void ssusb_set_force_mode(struct ssusb_mtk *ssusb, + enum mtu3_dr_force_mode mode); + +#endif /* _MTU3_DR_H_ */ diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c new file mode 100644 index 00000000000..027b7e61113 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_gadget.c @@ -0,0 +1,686 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mtu3_gadget.c - MediaTek usb3 DRD peripheral support + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#include "mtu3.h" + +void mtu3_req_complete(struct mtu3_ep *mep, + struct usb_request *req, int status) +__releases(mep->mtu->lock) +__acquires(mep->mtu->lock) +{ + struct mtu3_request *mreq = to_mtu3_request(req); + struct mtu3 *mtu = mreq->mtu; + + list_del(&mreq->list); + if (req->status == -EINPROGRESS) + req->status = status; + + spin_unlock(&mtu->lock); + + /* ep0 makes use of PIO, needn't unmap it */ + if (mep->epnum) + usb_gadget_unmap_request(&mtu->g, req, mep->is_in); + + dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", + mep->name, req, req->status, req->actual, req->length); + + usb_gadget_giveback_request(&mep->ep, req); + spin_lock(&mtu->lock); +} + +static void nuke(struct mtu3_ep *mep, const int status) +{ + struct mtu3_request *mreq = NULL; + + if (list_empty(&mep->req_list)) + return; + + dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); + + /* exclude EP0 */ + if (mep->epnum) + mtu3_qmu_flush(mep); + + while (!list_empty(&mep->req_list)) { + mreq = list_first_entry(&mep->req_list, + struct mtu3_request, list); + mtu3_req_complete(mep, &mreq->request, status); + } +} + +static int mtu3_ep_enable(struct mtu3_ep *mep) +{ + const struct usb_endpoint_descriptor *desc; + const struct usb_ss_ep_comp_descriptor *comp_desc; + struct mtu3 *mtu = mep->mtu; + u32 interval = 0; + u32 mult = 0; + u32 burst = 0; + int max_packet; + int ret; + + desc = mep->desc; + comp_desc = mep->comp_desc; + mep->type = usb_endpoint_type(desc); + max_packet = usb_endpoint_maxp(desc); + mep->maxp = max_packet & GENMASK(10, 0); + + switch (mtu->g.speed) { + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + if (usb_endpoint_xfer_int(desc) || + usb_endpoint_xfer_isoc(desc)) { + interval = desc->bInterval; + interval = clamp_val(interval, 1, 16) - 1; + if (usb_endpoint_xfer_isoc(desc) && comp_desc) + mult = comp_desc->bmAttributes; + } + if (comp_desc) + burst = comp_desc->bMaxBurst; + + break; + case USB_SPEED_HIGH: + if (usb_endpoint_xfer_isoc(desc) || + usb_endpoint_xfer_int(desc)) { + interval = desc->bInterval; + interval = clamp_val(interval, 1, 16) - 1; + burst = (max_packet & GENMASK(12, 11)) >> 11; + } + break; + default: + break; /*others are ignored */ + } + + dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n", + __func__, mep->maxp, interval, burst, mult); + + mep->ep.maxpacket = mep->maxp; + mep->ep.desc = desc; + mep->ep.comp_desc = comp_desc; + mep->slot = mtu->slot; + + ret = mtu3_config_ep(mtu, mep, interval, burst, mult); + if (ret < 0) + return ret; + + ret = mtu3_gpd_ring_alloc(mep); + if (ret < 0) { + mtu3_deconfig_ep(mtu, mep); + return ret; + } + + mtu3_qmu_start(mep); + + return 0; +} + +static int mtu3_ep_disable(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + + mtu3_qmu_stop(mep); + + /* abort all pending requests */ + nuke(mep, -ESHUTDOWN); + mtu3_deconfig_ep(mtu, mep); + mtu3_gpd_ring_free(mep); + + mep->desc = NULL; + mep->ep.desc = NULL; + mep->comp_desc = NULL; + mep->type = 0; + mep->flags = 0; + + return 0; +} + +static int mtu3_gadget_ep_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + struct mtu3_ep *mep; + struct mtu3 *mtu; + unsigned long flags; + int ret = -EINVAL; + + if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { + pr_debug("%s invalid parameters\n", __func__); + return -EINVAL; + } + + if (!desc->wMaxPacketSize) { + pr_debug("%s missing wMaxPacketSize\n", __func__); + return -EINVAL; + } + mep = to_mtu3_ep(ep); + mtu = mep->mtu; + + /* check ep number and direction against endpoint */ + if (usb_endpoint_num(desc) != mep->epnum) + return -EINVAL; + + if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in) + return -EINVAL; + + dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name); + + if (mep->flags & MTU3_EP_ENABLED) { + dev_warn(mtu->dev, "%s is already enabled\n", mep->name); + return 0; + } + + spin_lock_irqsave(&mtu->lock, flags); + mep->desc = desc; + mep->comp_desc = ep->comp_desc; + + ret = mtu3_ep_enable(mep); + if (ret) + goto error; + + mep->flags = MTU3_EP_ENABLED; + mtu->active_ep++; + +error: + spin_unlock_irqrestore(&mtu->lock, flags); + + dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep); + + return ret; +} + +static int mtu3_gadget_ep_disable(struct usb_ep *ep) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3 *mtu = mep->mtu; + unsigned long flags; + + dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name); + + if (!(mep->flags & MTU3_EP_ENABLED)) { + dev_warn(mtu->dev, "%s is already disabled\n", mep->name); + return 0; + } + + spin_lock_irqsave(&mtu->lock, flags); + mtu3_ep_disable(mep); + mep->flags = 0; + mtu->active_ep--; + spin_unlock_irqrestore(&mtu->lock, flags); + + dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n", + __func__, mtu->active_ep, mtu->is_active); + + return 0; +} + +struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3_request *mreq; + + mreq = kzalloc(sizeof(*mreq), gfp_flags); + if (!mreq) + return NULL; + + mreq->request.dma = DMA_ADDR_INVALID; + mreq->epnum = mep->epnum; + mreq->mep = mep; + + return &mreq->request; +} + +void mtu3_free_request(struct usb_ep *ep, struct usb_request *req) +{ + struct mtu3_request *mreq = to_mtu3_request(req); + + kfree(mreq); +} + +static int mtu3_gadget_queue(struct usb_ep *ep, + struct usb_request *req, gfp_t gfp_flags) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3_request *mreq = to_mtu3_request(req); + struct mtu3 *mtu = mep->mtu; + unsigned long flags; + int ret = 0; + + if (!req->buf) + return -ENODATA; + + if (mreq->mep != mep) + return -EINVAL; + + dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n", + __func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name, + mreq, ep->maxpacket, mreq->request.length); + + if (req->length > GPD_BUF_SIZE) { + dev_warn(mtu->dev, + "req length > supported MAX:%d requested:%d\n", + GPD_BUF_SIZE, req->length); + return -EOPNOTSUPP; + } + + /* don't queue if the ep is down */ + if (!mep->desc) { + dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n", + req, ep->name); + return -ESHUTDOWN; + } + + mreq->mtu = mtu; + mreq->request.actual = 0; + mreq->request.status = -EINPROGRESS; + + ret = usb_gadget_map_request(&mtu->g, req, mep->is_in); + if (ret) { + dev_err(mtu->dev, "dma mapping failed\n"); + return ret; + } + + spin_lock_irqsave(&mtu->lock, flags); + + if (mtu3_prepare_transfer(mep)) { + ret = -EAGAIN; + goto error; + } + + list_add_tail(&mreq->list, &mep->req_list); + mtu3_insert_gpd(mep, mreq); + mtu3_qmu_resume(mep); + +error: + spin_unlock_irqrestore(&mtu->lock, flags); + + return ret; +} + +static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3_request *mreq = to_mtu3_request(req); + struct mtu3_request *r; + struct mtu3 *mtu = mep->mtu; + unsigned long flags; + int ret = 0; + + if (mreq->mep != mep) + return -EINVAL; + + dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req); + + spin_lock_irqsave(&mtu->lock, flags); + + list_for_each_entry(r, &mep->req_list, list) { + if (r == mreq) + break; + } + if (r != mreq) { + dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name); + ret = -EINVAL; + goto done; + } + + mtu3_qmu_flush(mep); /* REVISIT: set BPS ?? */ + mtu3_req_complete(mep, req, -ECONNRESET); + mtu3_qmu_start(mep); + +done: + spin_unlock_irqrestore(&mtu->lock, flags); + + return ret; +} + +/* + * Set or clear the halt bit of an EP. + * A halted EP won't TX/RX any data but will queue requests. + */ +static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + struct mtu3 *mtu = mep->mtu; + struct mtu3_request *mreq; + unsigned long flags = 0; + int ret = 0; + + dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name); + + spin_lock_irqsave(&mtu->lock, flags); + + if (mep->type == USB_ENDPOINT_XFER_ISOC) { + ret = -EINVAL; + goto done; + } + + mreq = next_request(mep); + if (value) { + /* + * If there is not request for TX-EP, QMU will not transfer + * data to TX-FIFO, so no need check whether TX-FIFO + * holds bytes or not here + */ + if (mreq) { + dev_dbg(mtu->dev, "req in progress, cannot halt %s\n", + ep->name); + ret = -EAGAIN; + goto done; + } + } else { + mep->flags &= ~MTU3_EP_WEDGE; + } + + dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear"); + + mtu3_ep_stall_set(mep, value); + +done: + spin_unlock_irqrestore(&mtu->lock, flags); + + return ret; +} + +/* Sets the halt feature with the clear requests ignored */ +static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep) +{ + struct mtu3_ep *mep = to_mtu3_ep(ep); + + mep->flags |= MTU3_EP_WEDGE; + + return usb_ep_set_halt(ep); +} + +static const struct usb_ep_ops mtu3_ep_ops = { + .enable = mtu3_gadget_ep_enable, + .disable = mtu3_gadget_ep_disable, + .alloc_request = mtu3_alloc_request, + .free_request = mtu3_free_request, + .queue = mtu3_gadget_queue, + .dequeue = mtu3_gadget_dequeue, + .set_halt = mtu3_gadget_ep_set_halt, + .set_wedge = mtu3_gadget_ep_set_wedge, +}; + +static int mtu3_gadget_get_frame(struct usb_gadget *gadget) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + + return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM); +} + +static int mtu3_gadget_wakeup(struct usb_gadget *gadget) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + unsigned long flags; + + dev_dbg(mtu->dev, "%s\n", __func__); + + /* remote wakeup feature is not enabled by host */ + if (!mtu->may_wakeup) + return -EOPNOTSUPP; + + spin_lock_irqsave(&mtu->lock, flags); + if (mtu->g.speed >= USB_SPEED_SUPER) { + mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT); + } else { + mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); + spin_unlock_irqrestore(&mtu->lock, flags); + mdelay(10); + spin_lock_irqsave(&mtu->lock, flags); + mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME); + } + spin_unlock_irqrestore(&mtu->lock, flags); + return 0; +} + +static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget, + int is_selfpowered) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + + mtu->is_self_powered = !!is_selfpowered; + return 0; +} + +static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + unsigned long flags; + + dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__, + is_on ? "on" : "off", mtu->is_active ? "" : "in"); + + /* we'd rather not pullup unless the device is active. */ + spin_lock_irqsave(&mtu->lock, flags); + + is_on = !!is_on; + if (!mtu->is_active) { + /* save it for mtu3_start() to process the request */ + mtu->softconnect = is_on; + } else if (is_on != mtu->softconnect) { + mtu->softconnect = is_on; + mtu3_dev_on_off(mtu, is_on); + } + + spin_unlock_irqrestore(&mtu->lock, flags); + + return 0; +} + +static int mtu3_gadget_start(struct usb_gadget *gadget, + struct usb_gadget_driver *driver) +{ + struct mtu3 *mtu = gadget_to_mtu3(gadget); + unsigned long flags; + + if (mtu->gadget_driver) { + dev_err(mtu->dev, "%s is already bound to %s\n", + mtu->g.name, mtu->gadget_driver->function); + return -EBUSY; + } + + dev_dbg(mtu->dev, "bind driver %s\n", driver->function); + + spin_lock_irqsave(&mtu->lock, flags); + + mtu->softconnect = 0; + mtu->gadget_driver = driver; + mtu3_start(mtu); + + spin_unlock_irqrestore(&mtu->lock, flags); + + return 0; +} + +static void stop_activity(struct mtu3 *mtu) +{ + int i; + + mtu->g.speed = USB_SPEED_UNKNOWN; + + /* deactivate the hardware */ + if (mtu->softconnect) { + mtu->softconnect = 0; + mtu3_dev_on_off(mtu, 0); + } + + /* + * killing any outstanding requests will quiesce the driver; + * then report disconnect + */ + nuke(mtu->ep0, -ESHUTDOWN); + for (i = 1; i < mtu->num_eps; i++) { + nuke(mtu->in_eps + i, -ESHUTDOWN); + nuke(mtu->out_eps + i, -ESHUTDOWN); + } +} + +static int mtu3_gadget_stop(struct usb_gadget *g) +{ + struct mtu3 *mtu = gadget_to_mtu3(g); + unsigned long flags; + + dev_dbg(mtu->dev, "%s\n", __func__); + + spin_lock_irqsave(&mtu->lock, flags); + + stop_activity(mtu); + mtu->gadget_driver = NULL; + mtu3_stop(mtu); + + spin_unlock_irqrestore(&mtu->lock, flags); + + return 0; +} + +static void +mtu3_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed) +{ + struct mtu3 *mtu = gadget_to_mtu3(g); + unsigned long flags; + + dev_dbg(mtu->dev, "%s %d\n", __func__, speed); + + spin_lock_irqsave(&mtu->lock, flags); + mtu3_set_speed(mtu, speed); + spin_unlock_irqrestore(&mtu->lock, flags); +} + +static const struct usb_gadget_ops mtu3_gadget_ops = { + .get_frame = mtu3_gadget_get_frame, + .wakeup = mtu3_gadget_wakeup, + .set_selfpowered = mtu3_gadget_set_self_powered, + .pullup = mtu3_gadget_pullup, + .udc_start = mtu3_gadget_start, + .udc_stop = mtu3_gadget_stop, + .udc_set_speed = mtu3_gadget_set_speed, +}; + +static void mtu3_state_reset(struct mtu3 *mtu) +{ + mtu->address = 0; + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + mtu->may_wakeup = 0; + mtu->u1_enable = 0; + mtu->u2_enable = 0; + mtu->delayed_status = false; + mtu->test_mode = false; +} + +static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep, + u32 epnum, u32 is_in) +{ + mep->epnum = epnum; + mep->mtu = mtu; + mep->is_in = is_in; + + INIT_LIST_HEAD(&mep->req_list); + + sprintf(mep->name, "ep%d%s", epnum, + !epnum ? "" : (is_in ? "in" : "out")); + + mep->ep.name = mep->name; + INIT_LIST_HEAD(&mep->ep.ep_list); + + /* initialize maxpacket as SS */ + if (!epnum) { + usb_ep_set_maxpacket_limit(&mep->ep, USB_HS_MAXP); + mep->ep.ops = &mtu3_ep0_ops; + mtu->g.ep0 = &mep->ep; + } else { + usb_ep_set_maxpacket_limit(&mep->ep, USB_SS_MAXP); + mep->ep.ops = &mtu3_ep_ops; + list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list); + } + + dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name, + mep->ep.maxpacket); +} + +static void mtu3_gadget_init_eps(struct mtu3 *mtu) +{ + u8 epnum; + + /* initialize endpoint list just once */ + INIT_LIST_HEAD(&mtu->g.ep_list); + + dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n", + __func__, mtu->num_eps); + + init_hw_ep(mtu, mtu->ep0, 0, 0); + for (epnum = 1; epnum < mtu->num_eps; epnum++) { + init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1); + init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0); + } +} + +int mtu3_gadget_setup(struct mtu3 *mtu) +{ + mtu->g.ops = &mtu3_gadget_ops; + mtu->g.max_speed = mtu->max_speed; + mtu->g.speed = USB_SPEED_UNKNOWN; + mtu->g.is_dualspeed = 1; + mtu->g.name = MTU3_DRIVER_NAME; + mtu->is_active = 0; + mtu->delayed_status = false; + + mtu3_gadget_init_eps(mtu); + + return usb_add_gadget_udc((struct device *)mtu->dev, &mtu->g); +} + +void mtu3_gadget_cleanup(struct mtu3 *mtu) +{ + usb_del_gadget_udc(&mtu->g); +} + +void mtu3_gadget_resume(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "gadget RESUME\n"); + if (mtu->gadget_driver && mtu->gadget_driver->resume) { + spin_unlock(&mtu->lock); + mtu->gadget_driver->resume(&mtu->g); + spin_lock(&mtu->lock); + } +} + +/* called when SOF packets stop for 3+ msec or enters U3 */ +void mtu3_gadget_suspend(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "gadget SUSPEND\n"); + if (mtu->gadget_driver && mtu->gadget_driver->suspend) { + spin_unlock(&mtu->lock); + mtu->gadget_driver->suspend(&mtu->g); + spin_lock(&mtu->lock); + } +} + +/* called when VBUS drops below session threshold, and in other cases */ +void mtu3_gadget_disconnect(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "gadget DISCONNECT\n"); + if (mtu->gadget_driver && mtu->gadget_driver->disconnect) { + spin_unlock(&mtu->lock); + mtu->gadget_driver->disconnect(&mtu->g); + spin_lock(&mtu->lock); + } + + mtu3_state_reset(mtu); + usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED); +} + +void mtu3_gadget_reset(struct mtu3 *mtu) +{ + dev_dbg(mtu->dev, "gadget RESET\n"); + + /* report disconnect, if we didn't flush EP state */ + if (mtu->g.speed != USB_SPEED_UNKNOWN) + mtu3_gadget_disconnect(mtu); + else + mtu3_state_reset(mtu); +} diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c new file mode 100644 index 00000000000..4b0bc5f02d1 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c @@ -0,0 +1,933 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling + * + * Copyright (c) 2016 MediaTek Inc. + * + * Author: Chunfeng.Yun <chunfeng.yun@mediatek.com> + */ + +#include <linux/iopoll.h> +#include <linux/usb/composite.h> + +#include "mtu3.h" + +/* ep0 is always mtu3->in_eps[0] */ +#define next_ep0_request(mtu) next_request((mtu)->ep0) + +/* for high speed test mode; see USB 2.0 spec 7.1.20 */ +static const u8 mtu3_test_packet[53] = { + /* implicit SYNC then DATA0 to start */ + + /* JKJKJKJK x9 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* JJKKJJKK x8 */ + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + /* JJJJKKKK x8 */ + 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, + /* JJJJJJJKKKKKKK x8 */ + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + /* JJJJJJJK x8 */ + 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, + /* JKKKKKKK x10, JK */ + 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e, + /* implicit CRC16 then EOP to end */ +}; + +static char *decode_ep0_state(struct mtu3 *mtu) +{ + switch (mtu->ep0_state) { + case MU3D_EP0_STATE_SETUP: + return "SETUP"; + case MU3D_EP0_STATE_TX: + return "IN"; + case MU3D_EP0_STATE_RX: + return "OUT"; + case MU3D_EP0_STATE_TX_END: + return "TX-END"; + case MU3D_EP0_STATE_STALL: + return "STALL"; + default: + return "??"; + } +} + +static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req) +{ + mtu3_req_complete(mtu->ep0, req, 0); +} + +static int +forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) +__releases(mtu->lock) +__acquires(mtu->lock) +{ + int ret; + + if (!mtu->gadget_driver) + return -EOPNOTSUPP; + + spin_unlock(&mtu->lock); + ret = mtu->gadget_driver->setup(&mtu->g, setup); + spin_lock(&mtu->lock); + + dev_dbg(mtu->dev, "%s ret %d\n", __func__, ret); + return ret; +} + +static inline void writel_rep(volatile void *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u32 *buf = buffer; + + do { + writel(*buf++, addr); + } while (--count); + } +} + +static inline void readl_rep(const volatile void *addr, void *buffer, + unsigned int count) +{ + if (count) { + u32 *buf = buffer; + + do { + u32 x = readl(addr); + *buf++ = x; + } while (--count); + } +} + +static void ep0_write_fifo(struct mtu3_ep *mep, const u8 *src, u16 len) +{ + void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0; + u16 index = 0; + + dev_dbg(mep->mtu->dev, "%s: ep%din, len=%d, buf=%p\n", + __func__, mep->epnum, len, src); + + if (len >= 4) { + writel_rep(fifo, src, len >> 2); + index = len & ~0x03; + } + if (len & 0x02) { + writew(*(u16 *)&src[index], fifo); + index += 2; + } + if (len & 0x01) + writeb(src[index], fifo); +} + +static void ep0_read_fifo(struct mtu3_ep *mep, u8 *dst, u16 len) +{ + void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0; + u32 value; + u16 index = 0; + + dev_dbg(mep->mtu->dev, "%s: ep%dout len=%d buf=%p\n", + __func__, mep->epnum, len, dst); + + if (len >= 4) { + readl_rep(fifo, dst, len >> 2); + index = len & ~0x03; + } + if (len & 0x3) { + value = readl(fifo); + memcpy(&dst[index], &value, len & 0x3); + } +} + +static void ep0_load_test_packet(struct mtu3 *mtu) +{ + /* + * because the length of test packet is less than max packet of HS ep0, + * write it into fifo directly. + */ + ep0_write_fifo(mtu->ep0, mtu3_test_packet, sizeof(mtu3_test_packet)); +} + +/* + * A. send STALL for setup transfer without data stage: + * set SENDSTALL and SETUPPKTRDY at the same time; + * B. send STALL for other cases: + * set SENDSTALL only. + */ +static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy) +{ + struct mtu3 *mtu = mep0->mtu; + void __iomem *mbase = mtu->mac_base; + u32 csr; + + /* EP0_SENTSTALL is W1C */ + csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS; + if (set) + csr |= EP0_SENDSTALL | pktrdy; + else + csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL; + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr); + + mtu->delayed_status = false; + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + + dev_dbg(mtu->dev, "ep0: %s STALL, ep0_state: %s\n", + set ? "SEND" : "CLEAR", decode_ep0_state(mtu)); +} + +static void ep0_do_status_stage(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 value; + + value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS; + mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND); +} + +static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq); + +static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req) +{} + +static void ep0_set_sel_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct mtu3_request *mreq; + struct mtu3 *mtu; + struct usb_set_sel_req sel; + + memcpy(&sel, req->buf, sizeof(sel)); + + mreq = to_mtu3_request(req); + mtu = mreq->mtu; + dev_dbg(mtu->dev, "u1sel:%d, u1pel:%d, u2sel:%d, u2pel:%d\n", + sel.u1_sel, sel.u1_pel, sel.u2_sel, sel.u2_pel); +} + +/* queue data stage to handle 6 byte SET_SEL request */ +static int ep0_set_sel(struct mtu3 *mtu, struct usb_ctrlrequest *setup) +{ + int ret; + u16 length = le16_to_cpu(setup->wLength); + + if (unlikely(length != 6)) { + dev_err(mtu->dev, "%s wrong wLength:%d\n", + __func__, length); + return -EINVAL; + } + + mtu->ep0_req.mep = mtu->ep0; + mtu->ep0_req.request.length = 6; + mtu->ep0_req.request.buf = mtu->setup_buf; + mtu->ep0_req.request.complete = ep0_set_sel_complete; + ret = ep0_queue(mtu->ep0, &mtu->ep0_req); + + return ret < 0 ? ret : 1; +} + +static int +ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) +{ + struct mtu3_ep *mep = NULL; + int handled = 1; + u8 result[2] = {0, 0}; + u8 epnum = 0; + int is_in; + + switch (setup->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED; + result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; + + if (mtu->g.speed >= USB_SPEED_SUPER) { + result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED; + result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED; + } + + dev_dbg(mtu->dev, "%s result=%x, U1=%x, U2=%x\n", __func__, + result[0], mtu->u1_enable, mtu->u2_enable); + + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT: + epnum = (u8)le16_to_cpu(setup->wIndex); + is_in = epnum & USB_DIR_IN; + epnum &= USB_ENDPOINT_NUMBER_MASK; + + if (epnum >= mtu->num_eps) { + handled = -EINVAL; + break; + } + if (!epnum) + break; + + mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum; + if (!mep->desc) { + handled = -EINVAL; + break; + } + if (mep->flags & MTU3_EP_STALL) + result[0] |= 1 << USB_ENDPOINT_HALT; + + break; + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + + if (handled > 0) { + int ret; + + /* prepare a data stage for GET_STATUS */ + dev_dbg(mtu->dev, "get_status=%x\n", *(u16 *)result); + memcpy(mtu->setup_buf, result, sizeof(result)); + mtu->ep0_req.mep = mtu->ep0; + mtu->ep0_req.request.length = 2; + mtu->ep0_req.request.buf = &mtu->setup_buf; + mtu->ep0_req.request.complete = ep0_dummy_complete; + ret = ep0_queue(mtu->ep0, &mtu->ep0_req); + if (ret < 0) + handled = ret; + } + return handled; +} + +static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup) +{ + void __iomem *mbase = mtu->mac_base; + int handled = 1; + u32 value = 0; + + switch (le16_to_cpu(setup->wIndex) >> 8) { + case TEST_J: + dev_dbg(mtu->dev, "TEST_J\n"); + mtu->test_mode_nr = TEST_J_MODE; + break; + case TEST_K: + dev_dbg(mtu->dev, "TEST_K\n"); + mtu->test_mode_nr = TEST_K_MODE; + break; + case TEST_SE0_NAK: + dev_dbg(mtu->dev, "TEST_SE0_NAK\n"); + mtu->test_mode_nr = TEST_SE0_NAK_MODE; + break; + case TEST_PACKET: + dev_dbg(mtu->dev, "TEST_PACKET\n"); + mtu->test_mode_nr = TEST_PACKET_MODE; + break; + default: + handled = -EINVAL; + goto out; + } + + mtu->test_mode = true; + + /* no TX completion interrupt, and need restart platform after test */ + if (mtu->test_mode_nr == TEST_PACKET_MODE) + ep0_load_test_packet(mtu); + + /* send status before entering test mode. */ + ep0_do_status_stage(mtu); + + /* wait for ACK status sent by host */ + readl_poll_timeout(mbase + U3D_EP0CSR, value, + !(value & EP0_DATAEND), 5000); + + mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr); + + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + +out: + return handled; +} + +static int ep0_handle_feature_dev(struct mtu3 *mtu, + struct usb_ctrlrequest *setup, bool set) +{ + void __iomem *mbase = mtu->mac_base; + int handled = -EINVAL; + u32 lpc; + + switch (le16_to_cpu(setup->wValue)) { + case USB_DEVICE_REMOTE_WAKEUP: + mtu->may_wakeup = !!set; + handled = 1; + break; + case USB_DEVICE_TEST_MODE: + if (!set || (mtu->g.speed != USB_SPEED_HIGH) || + (le16_to_cpu(setup->wIndex) & 0xff)) + break; + + handled = handle_test_mode(mtu, setup); + break; + case USB_DEVICE_U1_ENABLE: + if (mtu->g.speed < USB_SPEED_SUPER || + mtu->g.state != USB_STATE_CONFIGURED) + break; + + lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL); + if (set) + lpc |= SW_U1_REQUEST_ENABLE; + else + lpc &= ~SW_U1_REQUEST_ENABLE; + mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc); + + mtu->u1_enable = !!set; + handled = 1; + break; + case USB_DEVICE_U2_ENABLE: + if (mtu->g.speed < USB_SPEED_SUPER || + mtu->g.state != USB_STATE_CONFIGURED) + break; + + lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL); + if (set) + lpc |= SW_U2_REQUEST_ENABLE; + else + lpc &= ~SW_U2_REQUEST_ENABLE; + mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc); + + mtu->u2_enable = !!set; + handled = 1; + break; + default: + handled = -EINVAL; + break; + } + return handled; +} + +static int ep0_handle_feature(struct mtu3 *mtu, + struct usb_ctrlrequest *setup, bool set) +{ + struct mtu3_ep *mep; + int handled = -EINVAL; + int is_in; + u16 value; + u16 index; + u8 epnum; + + value = le16_to_cpu(setup->wValue); + index = le16_to_cpu(setup->wIndex); + + switch (setup->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + handled = ep0_handle_feature_dev(mtu, setup, set); + break; + case USB_RECIP_INTERFACE: + /* superspeed only */ + if (value == USB_INTRF_FUNC_SUSPEND && + mtu->g.speed >= USB_SPEED_SUPER) { + /* + * forward the request because function drivers + * should handle it + */ + handled = 0; + } + break; + case USB_RECIP_ENDPOINT: + epnum = index & USB_ENDPOINT_NUMBER_MASK; + if (epnum == 0 || epnum >= mtu->num_eps || + value != USB_ENDPOINT_HALT) + break; + + is_in = index & USB_DIR_IN; + mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum; + if (!mep->desc) + break; + + handled = 1; + /* ignore request if endpoint is wedged */ + if (mep->flags & MTU3_EP_WEDGE) + break; + + mtu3_ep_stall_set(mep, set); + break; + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + return handled; +} + +/* + * handle all control requests can be handled + * returns: + * negative errno - error happened + * zero - need delegate SETUP to gadget driver + * positive - already handled + */ +static int handle_standard_request(struct mtu3 *mtu, + struct usb_ctrlrequest *setup) +{ + void __iomem *mbase = mtu->mac_base; + enum usb_device_state state = mtu->g.state; + int handled = -EINVAL; + u32 dev_conf; + u16 value; + + value = le16_to_cpu(setup->wValue); + + /* the gadget driver handles everything except what we must handle */ + switch (setup->bRequest) { + case USB_REQ_SET_ADDRESS: + /* change it after the status stage */ + mtu->address = (u8)(value & 0x7f); + dev_dbg(mtu->dev, "set address to 0x%x\n", mtu->address); + + dev_conf = mtu3_readl(mbase, U3D_DEVICE_CONF); + dev_conf &= ~DEV_ADDR_MSK; + dev_conf |= DEV_ADDR(mtu->address); + mtu3_writel(mbase, U3D_DEVICE_CONF, dev_conf); + + if (mtu->address) + usb_gadget_set_state(&mtu->g, USB_STATE_ADDRESS); + else + usb_gadget_set_state(&mtu->g, USB_STATE_DEFAULT); + + handled = 1; + break; + case USB_REQ_SET_CONFIGURATION: + if (state == USB_STATE_ADDRESS) { + usb_gadget_set_state(&mtu->g, + USB_STATE_CONFIGURED); + } else if (state == USB_STATE_CONFIGURED) { + /* + * USB2 spec sec 9.4.7, if wValue is 0 then dev + * is moved to addressed state + */ + if (!value) + usb_gadget_set_state(&mtu->g, + USB_STATE_ADDRESS); + } + handled = 0; + break; + case USB_REQ_CLEAR_FEATURE: + handled = ep0_handle_feature(mtu, setup, 0); + break; + case USB_REQ_SET_FEATURE: + handled = ep0_handle_feature(mtu, setup, 1); + break; + case USB_REQ_GET_STATUS: + handled = ep0_get_status(mtu, setup); + break; + case USB_REQ_SET_SEL: + handled = ep0_set_sel(mtu, setup); + break; + case USB_REQ_SET_ISOCH_DELAY: + handled = 1; + break; + default: + /* delegate SET_CONFIGURATION, etc */ + handled = 0; + } + + return handled; +} + +/* receive an data packet (OUT) */ +static void ep0_rx_state(struct mtu3 *mtu) +{ + struct mtu3_request *mreq; + struct usb_request *req; + void __iomem *mbase = mtu->mac_base; + u32 maxp; + u32 csr; + u16 count = 0; + + dev_dbg(mtu->dev, "%s\n", __func__); + + csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS; + mreq = next_ep0_request(mtu); + req = &mreq->request; + + /* read packet and ack; or stall because of gadget driver bug */ + if (req) { + void *buf = req->buf + req->actual; + unsigned int len = req->length - req->actual; + + /* read the buffer */ + count = mtu3_readl(mbase, U3D_RXCOUNT0); + if (count > len) { + req->status = -EOVERFLOW; + count = len; + } + ep0_read_fifo(mtu->ep0, buf, count); + req->actual += count; + csr |= EP0_RXPKTRDY; + + maxp = mtu->g.ep0->maxpacket; + if (count < maxp || req->actual == req->length) { + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + dev_dbg(mtu->dev, "ep0 state: %s\n", + decode_ep0_state(mtu)); + + csr |= EP0_DATAEND; + } else { + req = NULL; + } + } else { + csr |= EP0_RXPKTRDY | EP0_SENDSTALL; + dev_dbg(mtu->dev, "%s: SENDSTALL\n", __func__); + } + + mtu3_writel(mbase, U3D_EP0CSR, csr); + + /* give back the request if have received all data */ + if (req) + ep0_req_giveback(mtu, req); +} + +/* transmitting to the host (IN) */ +static void ep0_tx_state(struct mtu3 *mtu) +{ + struct mtu3_request *mreq = next_ep0_request(mtu); + struct usb_request *req; + u32 csr; + u8 *src; + u32 count; + u32 maxp; + + dev_dbg(mtu->dev, "%s\n", __func__); + + if (!mreq) + return; + + maxp = mtu->g.ep0->maxpacket; + req = &mreq->request; + + /* load the data */ + src = (u8 *)req->buf + req->actual; + count = min(maxp, req->length - req->actual); + if (count) + ep0_write_fifo(mtu->ep0, src, count); + + dev_dbg(mtu->dev, "%s act=%d, len=%d, cnt=%d, maxp=%d zero=%d\n", + __func__, req->actual, req->length, count, maxp, req->zero); + + req->actual += count; + + if ((count < maxp) || + ((req->actual == req->length) && !req->zero)) + mtu->ep0_state = MU3D_EP0_STATE_TX_END; + + /* send it out, triggering a "txpktrdy cleared" irq */ + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS; + mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr | EP0_TXPKTRDY); + + dev_dbg(mtu->dev, "%s ep0csr=0x%x\n", __func__, + mtu3_readl(mtu->mac_base, U3D_EP0CSR)); +} + +static void ep0_read_setup(struct mtu3 *mtu, struct usb_ctrlrequest *setup) +{ + struct mtu3_request *mreq; + u32 count; + u32 csr; + + csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS; + count = mtu3_readl(mtu->mac_base, U3D_RXCOUNT0); + + ep0_read_fifo(mtu->ep0, (u8 *)setup, count); + + dev_dbg(mtu->dev, "SETUP req%02x.%02x v%04x i%04x l%04x\n", + setup->bRequestType, setup->bRequest, + le16_to_cpu(setup->wValue), le16_to_cpu(setup->wIndex), + le16_to_cpu(setup->wLength)); + + /* clean up any leftover transfers */ + mreq = next_ep0_request(mtu); + if (mreq) + ep0_req_giveback(mtu, &mreq->request); + + if (le16_to_cpu(setup->wLength) == 0) { + ; /* no data stage, nothing to do */ + } else if (setup->bRequestType & USB_DIR_IN) { + mtu3_writel(mtu->mac_base, U3D_EP0CSR, + csr | EP0_SETUPPKTRDY | EP0_DPHTX); + mtu->ep0_state = MU3D_EP0_STATE_TX; + } else { + mtu3_writel(mtu->mac_base, U3D_EP0CSR, + (csr | EP0_SETUPPKTRDY) & (~EP0_DPHTX)); + mtu->ep0_state = MU3D_EP0_STATE_RX; + } +} + +static int ep0_handle_setup(struct mtu3 *mtu) +__releases(mtu->lock) +__acquires(mtu->lock) +{ + struct usb_ctrlrequest setup; + struct mtu3_request *mreq; + int handled = 0; + + ep0_read_setup(mtu, &setup); + + if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + handled = handle_standard_request(mtu, &setup); + + dev_dbg(mtu->dev, "handled %d, ep0_state: %s\n", + handled, decode_ep0_state(mtu)); + + if (handled < 0) + goto stall; + else if (handled > 0) + goto finish; + + handled = forward_to_driver(mtu, &setup); + if (handled < 0) { +stall: + dev_dbg(mtu->dev, "%s stall (%d)\n", __func__, handled); + + ep0_stall_set(mtu->ep0, true, + le16_to_cpu(setup.wLength) ? 0 : EP0_SETUPPKTRDY); + + return 0; + } + +finish: + if (mtu->test_mode) { + ; /* nothing to do */ + } else if (handled == USB_GADGET_DELAYED_STATUS) { + + mreq = next_ep0_request(mtu); + if (mreq) { + /* already asked us to continue delayed status */ + ep0_do_status_stage(mtu); + ep0_req_giveback(mtu, &mreq->request); + } else { + /* do delayed STATUS stage till receive ep0_queue */ + mtu->delayed_status = true; + } + } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */ + + ep0_do_status_stage(mtu); + /* complete zlp request directly */ + mreq = next_ep0_request(mtu); + if (mreq && !mreq->request.length) + ep0_req_giveback(mtu, &mreq->request); + } + + return 0; +} + +irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + struct mtu3_request *mreq; + u32 int_status; + irqreturn_t ret = IRQ_NONE; + u32 csr; + u32 len; + + int_status = mtu3_readl(mbase, U3D_EPISR); + int_status &= mtu3_readl(mbase, U3D_EPIER); + mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */ + + /* only handle ep0's */ + if (!(int_status & (EP0ISR | SETUPENDISR))) + return IRQ_NONE; + + /* abort current SETUP, and process new one */ + if (int_status & SETUPENDISR) + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + + csr = mtu3_readl(mbase, U3D_EP0CSR); + + dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr); + + /* we sent a stall.. need to clear it now.. */ + if (csr & EP0_SENTSTALL) { + ep0_stall_set(mtu->ep0, false, 0); + csr = mtu3_readl(mbase, U3D_EP0CSR); + ret = IRQ_HANDLED; + } + dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu)); + + switch (mtu->ep0_state) { + case MU3D_EP0_STATE_TX: + /* irq on clearing txpktrdy */ + if ((csr & EP0_FIFOFULL) == 0) { + ep0_tx_state(mtu); + ret = IRQ_HANDLED; + } + break; + case MU3D_EP0_STATE_RX: + /* irq on set rxpktrdy */ + if (csr & EP0_RXPKTRDY) { + ep0_rx_state(mtu); + ret = IRQ_HANDLED; + } + break; + case MU3D_EP0_STATE_TX_END: + mtu3_writel(mbase, U3D_EP0CSR, + (csr & EP0_W1C_BITS) | EP0_DATAEND); + + mreq = next_ep0_request(mtu); + if (mreq) + ep0_req_giveback(mtu, &mreq->request); + + mtu->ep0_state = MU3D_EP0_STATE_SETUP; + ret = IRQ_HANDLED; + dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu)); + break; + case MU3D_EP0_STATE_SETUP: + if (!(csr & EP0_SETUPPKTRDY)) + break; + + len = mtu3_readl(mbase, U3D_RXCOUNT0); + if (len != 8) { + dev_err(mtu->dev, "SETUP packet len %d != 8 ?\n", len); + break; + } + + ep0_handle_setup(mtu); + ret = IRQ_HANDLED; + break; + default: + /* can't happen */ + ep0_stall_set(mtu->ep0, true, 0); + WARN_ON(1); + break; + } + + return ret; +} + +static int mtu3_ep0_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + /* always enabled */ + return -EINVAL; +} + +static int mtu3_ep0_disable(struct usb_ep *ep) +{ + /* always enabled */ + return -EINVAL; +} + +static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq) +{ + struct mtu3 *mtu = mep->mtu; + + mreq->mtu = mtu; + mreq->request.actual = 0; + mreq->request.status = -EINPROGRESS; + + dev_dbg(mtu->dev, "%s %s (ep0_state: %s), len#%d\n", __func__, + mep->name, decode_ep0_state(mtu), mreq->request.length); + + switch (mtu->ep0_state) { + case MU3D_EP0_STATE_SETUP: + case MU3D_EP0_STATE_RX: /* control-OUT data */ + case MU3D_EP0_STATE_TX: /* control-IN data */ + break; + default: + dev_err(mtu->dev, "%s, error in ep0 state %s\n", __func__, + decode_ep0_state(mtu)); + return -EINVAL; + } + + if (mtu->delayed_status) { + mtu->delayed_status = false; + ep0_do_status_stage(mtu); + /* needn't giveback the request for handling delay STATUS */ + return 0; + } + + if (!list_empty(&mep->req_list)) + return -EBUSY; + + list_add_tail(&mreq->list, &mep->req_list); + + /* sequence #1, IN ... start writing the data */ + if (mtu->ep0_state == MU3D_EP0_STATE_TX) + ep0_tx_state(mtu); + + return 0; +} + +static int mtu3_ep0_queue(struct usb_ep *ep, + struct usb_request *req, gfp_t gfp) +{ + struct mtu3_ep *mep; + struct mtu3_request *mreq; + struct mtu3 *mtu; + unsigned long flags; + int ret = 0; + + if (!ep || !req) + return -EINVAL; + + mep = to_mtu3_ep(ep); + mtu = mep->mtu; + mreq = to_mtu3_request(req); + + spin_lock_irqsave(&mtu->lock, flags); + ret = ep0_queue(mep, mreq); + spin_unlock_irqrestore(&mtu->lock, flags); + return ret; +} + +static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) +{ + /* we just won't support this */ + return -EINVAL; +} + +static int mtu3_ep0_halt(struct usb_ep *ep, int value) +{ + struct mtu3_ep *mep; + struct mtu3 *mtu; + unsigned long flags; + int ret = 0; + + if (!ep || !value) + return -EINVAL; + + mep = to_mtu3_ep(ep); + mtu = mep->mtu; + + dev_dbg(mtu->dev, "%s\n", __func__); + + spin_lock_irqsave(&mtu->lock, flags); + + if (!list_empty(&mep->req_list)) { + ret = -EBUSY; + goto cleanup; + } + + switch (mtu->ep0_state) { + /* + * stalls are usually issued after parsing SETUP packet, either + * directly in irq context from setup() or else later. + */ + case MU3D_EP0_STATE_TX: + case MU3D_EP0_STATE_TX_END: + case MU3D_EP0_STATE_RX: + case MU3D_EP0_STATE_SETUP: + ep0_stall_set(mtu->ep0, true, 0); + break; + default: + dev_dbg(mtu->dev, "ep0 can't halt in state %s\n", + decode_ep0_state(mtu)); + ret = -EINVAL; + } + +cleanup: + spin_unlock_irqrestore(&mtu->lock, flags); + return ret; +} + +const struct usb_ep_ops mtu3_ep0_ops = { + .enable = mtu3_ep0_enable, + .disable = mtu3_ep0_disable, + .alloc_request = mtu3_alloc_request, + .free_request = mtu3_free_request, + .queue = mtu3_ep0_queue, + .dequeue = mtu3_ep0_dequeue, + .set_halt = mtu3_ep0_halt, +}; diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c new file mode 100644 index 00000000000..8001fc2d9bd --- /dev/null +++ b/drivers/usb/mtu3/mtu3_host.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mtu3_dr.c - dual role switch and host glue layer + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#include <dm/lists.h> +#include <linux/iopoll.h> + +#include "mtu3.h" +#include "mtu3_dr.h" + +static void host_ports_num_get(struct mtu3_host *u3h) +{ + u32 xhci_cap; + + xhci_cap = mtu3_readl(u3h->ippc_base, U3D_SSUSB_IP_XHCI_CAP); + u3h->u2_ports = SSUSB_IP_XHCI_U2_PORT_NUM(xhci_cap); + u3h->u3_ports = SSUSB_IP_XHCI_U3_PORT_NUM(xhci_cap); + + dev_dbg(u3h->dev, "host - u2_ports:%d, u3_ports:%d\n", + u3h->u2_ports, u3h->u3_ports); +} + +/* only configure ports will be used later */ +static int ssusb_host_enable(struct mtu3_host *u3h) +{ + void __iomem *ibase = u3h->ippc_base; + int num_u3p = u3h->u3_ports; + int num_u2p = u3h->u2_ports; + int u3_ports_disabed; + u32 check_clk; + u32 value; + int i; + + /* power on host ip */ + mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN); + + /* power on and enable u3 ports except skipped ones */ + u3_ports_disabed = 0; + for (i = 0; i < num_u3p; i++) { + if ((0x1 << i) & u3h->u3p_dis_msk) { + u3_ports_disabed++; + continue; + } + + value = mtu3_readl(ibase, SSUSB_U3_CTRL(i)); + value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS); + value |= SSUSB_U3_PORT_HOST_SEL; + mtu3_writel(ibase, SSUSB_U3_CTRL(i), value); + } + + /* power on and enable all u2 ports */ + for (i = 0; i < num_u2p; i++) { + value = mtu3_readl(ibase, SSUSB_U2_CTRL(i)); + value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS); + value |= SSUSB_U2_PORT_HOST_SEL; + mtu3_writel(ibase, SSUSB_U2_CTRL(i), value); + } + + check_clk = SSUSB_XHCI_RST_B_STS; + if (num_u3p > u3_ports_disabed) + check_clk = SSUSB_U3_MAC_RST_B_STS; + + return ssusb_check_clocks(u3h->ssusb, check_clk); +} + +static void ssusb_host_disable(struct mtu3_host *u3h) +{ + void __iomem *ibase = u3h->ippc_base; + int num_u3p = u3h->u3_ports; + int num_u2p = u3h->u2_ports; + u32 value; + int i; + + /* power down and disable u3 ports except skipped ones */ + for (i = 0; i < num_u3p; i++) { + if ((0x1 << i) & u3h->u3p_dis_msk) + continue; + + value = mtu3_readl(ibase, SSUSB_U3_CTRL(i)); + value |= SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS; + mtu3_writel(ibase, SSUSB_U3_CTRL(i), value); + } + + /* power down and disable all u2 ports */ + for (i = 0; i < num_u2p; i++) { + value = mtu3_readl(ibase, SSUSB_U2_CTRL(i)); + value |= SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS; + mtu3_writel(ibase, SSUSB_U2_CTRL(i), value); + } + + /* power down host ip */ + mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN); +} + +/* + * If host supports multiple ports, the VBUSes(5V) of ports except port0 + * which supports OTG are better to be enabled by default in DTS. + * Because the host driver will keep link with devices attached when system + * enters suspend mode, so no need to control VBUSes after initialization. + */ +int ssusb_host_init(struct ssusb_mtk *ssusb) +{ + struct mtu3_host *u3h = ssusb->u3h; + struct udevice *dev = u3h->dev; + int ret; + + u3h->ssusb = ssusb; + u3h->hcd = ssusb->mac_base; + u3h->ippc_base = ssusb->ippc_base; + + /* optional property, ignore the error */ + dev_read_u32(dev, "mediatek,u3p-dis-msk", &u3h->u3p_dis_msk); + + host_ports_num_get(u3h); + ret = ssusb_host_enable(u3h); + if (ret) + return ret; + + ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST); + + ret = regulator_set_enable(ssusb->vbus_supply, true); + if (ret < 0 && ret != -ENOSYS) { + dev_err(dev, "failed to enable vbus %d!\n", ret); + return ret; + } + + dev_info(dev, "%s done...\n", __func__); + + return 0; +} + +void ssusb_host_exit(struct ssusb_mtk *ssusb) +{ + regulator_set_enable(ssusb->vbus_supply, false); + ssusb_host_disable(ssusb->u3h); +} diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h new file mode 100644 index 00000000000..9c2a7e1f466 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_hw_regs.h @@ -0,0 +1,515 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#ifndef _SSUSB_HW_REGS_H_ +#define _SSUSB_HW_REGS_H_ + +/* segment offset of MAC register */ +#define SSUSB_XCHI_BASE 0x0000 +#define SSUSB_DEV_BASE 0x1000 +#define SSUSB_EPCTL_CSR_BASE 0x1800 +#define SSUSB_USB3_MAC_CSR_BASE 0x2400 +#define SSUSB_USB3_SYS_CSR_BASE 0x2400 +#define SSUSB_USB2_CSR_BASE 0x3400 + +/* IPPC register in Infra */ +#define SSUSB_SIFSLV_IPPC_BASE 0x0000 + +/* --------------- SSUSB_DEV REGISTER DEFINITION --------------- */ + +#define U3D_LV1ISR (SSUSB_DEV_BASE + 0x0000) +#define U3D_LV1IER (SSUSB_DEV_BASE + 0x0004) +#define U3D_LV1IESR (SSUSB_DEV_BASE + 0x0008) +#define U3D_LV1IECR (SSUSB_DEV_BASE + 0x000C) + +#define U3D_EPISR (SSUSB_DEV_BASE + 0x0080) +#define U3D_EPIER (SSUSB_DEV_BASE + 0x0084) +#define U3D_EPIESR (SSUSB_DEV_BASE + 0x0088) +#define U3D_EPIECR (SSUSB_DEV_BASE + 0x008C) + +#define U3D_EP0CSR (SSUSB_DEV_BASE + 0x0100) +#define U3D_RXCOUNT0 (SSUSB_DEV_BASE + 0x0108) +#define U3D_RESERVED (SSUSB_DEV_BASE + 0x010C) +#define U3D_TX1CSR0 (SSUSB_DEV_BASE + 0x0110) +#define U3D_TX1CSR1 (SSUSB_DEV_BASE + 0x0114) +#define U3D_TX1CSR2 (SSUSB_DEV_BASE + 0x0118) + +#define U3D_RX1CSR0 (SSUSB_DEV_BASE + 0x0210) +#define U3D_RX1CSR1 (SSUSB_DEV_BASE + 0x0214) +#define U3D_RX1CSR2 (SSUSB_DEV_BASE + 0x0218) + +#define U3D_FIFO0 (SSUSB_DEV_BASE + 0x0300) + +#define U3D_QCR0 (SSUSB_DEV_BASE + 0x0400) +#define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404) +#define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408) +#define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C) +#define U3D_QFCR (SSUSB_DEV_BASE + 0x0428) + +#define U3D_TXQCSR1 (SSUSB_DEV_BASE + 0x0510) +#define U3D_TXQSAR1 (SSUSB_DEV_BASE + 0x0514) +#define U3D_TXQCPR1 (SSUSB_DEV_BASE + 0x0518) + +#define U3D_RXQCSR1 (SSUSB_DEV_BASE + 0x0610) +#define U3D_RXQSAR1 (SSUSB_DEV_BASE + 0x0614) +#define U3D_RXQCPR1 (SSUSB_DEV_BASE + 0x0618) +#define U3D_RXQLDPR1 (SSUSB_DEV_BASE + 0x061C) + +#define U3D_QISAR0 (SSUSB_DEV_BASE + 0x0700) +#define U3D_QIER0 (SSUSB_DEV_BASE + 0x0704) +#define U3D_QIESR0 (SSUSB_DEV_BASE + 0x0708) +#define U3D_QIECR0 (SSUSB_DEV_BASE + 0x070C) +#define U3D_QISAR1 (SSUSB_DEV_BASE + 0x0710) +#define U3D_QIER1 (SSUSB_DEV_BASE + 0x0714) +#define U3D_QIESR1 (SSUSB_DEV_BASE + 0x0718) +#define U3D_QIECR1 (SSUSB_DEV_BASE + 0x071C) + +#define U3D_TQERRIR0 (SSUSB_DEV_BASE + 0x0780) +#define U3D_TQERRIER0 (SSUSB_DEV_BASE + 0x0784) +#define U3D_TQERRIESR0 (SSUSB_DEV_BASE + 0x0788) +#define U3D_TQERRIECR0 (SSUSB_DEV_BASE + 0x078C) +#define U3D_RQERRIR0 (SSUSB_DEV_BASE + 0x07C0) +#define U3D_RQERRIER0 (SSUSB_DEV_BASE + 0x07C4) +#define U3D_RQERRIESR0 (SSUSB_DEV_BASE + 0x07C8) +#define U3D_RQERRIECR0 (SSUSB_DEV_BASE + 0x07CC) +#define U3D_RQERRIR1 (SSUSB_DEV_BASE + 0x07D0) +#define U3D_RQERRIER1 (SSUSB_DEV_BASE + 0x07D4) +#define U3D_RQERRIESR1 (SSUSB_DEV_BASE + 0x07D8) +#define U3D_RQERRIECR1 (SSUSB_DEV_BASE + 0x07DC) + +#define U3D_CAP_EP0FFSZ (SSUSB_DEV_BASE + 0x0C04) +#define U3D_CAP_EPNTXFFSZ (SSUSB_DEV_BASE + 0x0C08) +#define U3D_CAP_EPNRXFFSZ (SSUSB_DEV_BASE + 0x0C0C) +#define U3D_CAP_EPINFO (SSUSB_DEV_BASE + 0x0C10) +#define U3D_MISC_CTRL (SSUSB_DEV_BASE + 0x0C84) + +/*---------------- SSUSB_DEV FIELD DEFINITION ---------------*/ + +/* U3D_LV1ISR */ +#define EP_CTRL_INTR BIT(5) +#define MAC2_INTR BIT(4) +#define DMA_INTR BIT(3) +#define MAC3_INTR BIT(2) +#define QMU_INTR BIT(1) +#define BMU_INTR BIT(0) + +/* U3D_LV1IECR */ +#define LV1IECR_MSK GENMASK(31, 0) + +/* U3D_EPISR */ +#define EPRISR(x) (BIT(16) << (x)) +#define SETUPENDISR BIT(16) +#define EPTISR(x) (BIT(0) << (x)) +#define EP0ISR BIT(0) + +/* U3D_EP0CSR */ +#define EP0_SENDSTALL BIT(25) +#define EP0_FIFOFULL BIT(23) +#define EP0_SENTSTALL BIT(22) +#define EP0_DPHTX BIT(20) +#define EP0_DATAEND BIT(19) +#define EP0_TXPKTRDY BIT(18) +#define EP0_SETUPPKTRDY BIT(17) +#define EP0_RXPKTRDY BIT(16) +#define EP0_MAXPKTSZ_MSK GENMASK(9, 0) +#define EP0_MAXPKTSZ(x) ((x) & EP0_MAXPKTSZ_MSK) +#define EP0_W1C_BITS (~(EP0_RXPKTRDY | EP0_SETUPPKTRDY | EP0_SENTSTALL)) + +/* U3D_TX1CSR0 */ +#define TX_DMAREQEN BIT(29) +#define TX_FIFOFULL BIT(25) +#define TX_FIFOEMPTY BIT(24) +#define TX_SENTSTALL BIT(22) +#define TX_SENDSTALL BIT(21) +#define TX_TXPKTRDY BIT(16) +#define TX_TXMAXPKTSZ_MSK GENMASK(10, 0) +#define TX_TXMAXPKTSZ(x) ((x) & TX_TXMAXPKTSZ_MSK) +#define TX_W1C_BITS (~(TX_SENTSTALL)) + +/* U3D_TX1CSR1 */ +#define TX_MAX_PKT_G2(x) (((x) & 0xff) << 24) +#define TX_MULT_G2(x) (((x) & 0x7) << 21) +#define TX_MULT_OG(x) (((x) & 0x3) << 22) +#define TX_MAX_PKT_OG(x) (((x) & 0x3f) << 16) +#define TX_SLOT(x) (((x) & 0x3f) << 8) +#define TX_TYPE(x) (((x) & 0x3) << 4) +#define TX_SS_BURST(x) (((x) & 0xf) << 0) +#define TX_MULT(g2c, x) \ +({ \ + typeof(x) x_ = (x); \ + (g2c) ? TX_MULT_G2(x_) : TX_MULT_OG(x_); \ +}) +#define TX_MAX_PKT(g2c, x) \ +({ \ + typeof(x) x_ = (x); \ + (g2c) ? TX_MAX_PKT_G2(x_) : TX_MAX_PKT_OG(x_); \ +}) + +/* for TX_TYPE & RX_TYPE */ +#define TYPE_BULK (0x0) +#define TYPE_INT (0x1) +#define TYPE_ISO (0x2) +#define TYPE_MASK (0x3) + +/* U3D_TX1CSR2 */ +#define TX_BINTERVAL(x) (((x) & 0xff) << 24) +#define TX_FIFOSEGSIZE(x) (((x) & 0xf) << 16) +#define TX_FIFOADDR(x) (((x) & 0x1fff) << 0) + +/* U3D_RX1CSR0 */ +#define RX_DMAREQEN BIT(29) +#define RX_SENTSTALL BIT(22) +#define RX_SENDSTALL BIT(21) +#define RX_RXPKTRDY BIT(16) +#define RX_RXMAXPKTSZ_MSK GENMASK(10, 0) +#define RX_RXMAXPKTSZ(x) ((x) & RX_RXMAXPKTSZ_MSK) +#define RX_W1C_BITS (~(RX_SENTSTALL | RX_RXPKTRDY)) + +/* U3D_RX1CSR1 */ +#define RX_MAX_PKT_G2(x) (((x) & 0xff) << 24) +#define RX_MULT_G2(x) (((x) & 0x7) << 21) +#define RX_MULT_OG(x) (((x) & 0x3) << 22) +#define RX_MAX_PKT_OG(x) (((x) & 0x3f) << 16) +#define RX_SLOT(x) (((x) & 0x3f) << 8) +#define RX_TYPE(x) (((x) & 0x3) << 4) +#define RX_SS_BURST(x) (((x) & 0xf) << 0) +#define RX_MULT(g2c, x) \ +({ \ + typeof(x) x_ = (x); \ + (g2c) ? RX_MULT_G2(x_) : RX_MULT_OG(x_); \ +}) +#define RX_MAX_PKT(g2c, x) \ +({ \ + typeof(x) x_ = (x); \ + (g2c) ? RX_MAX_PKT_G2(x_) : RX_MAX_PKT_OG(x_); \ +}) + +/* U3D_RX1CSR2 */ +#define RX_BINTERVAL(x) (((x) & 0xff) << 24) +#define RX_FIFOSEGSIZE(x) (((x) & 0xf) << 16) +#define RX_FIFOADDR(x) (((x) & 0x1fff) << 0) + +/* U3D_QCR0 */ +#define QMU_RX_CS_EN(x) (BIT(16) << (x)) +#define QMU_TX_CS_EN(x) (BIT(0) << (x)) +#define QMU_CS16B_EN BIT(0) + +/* U3D_QCR1 */ +#define QMU_TX_ZLP(x) (BIT(0) << (x)) + +/* U3D_QCR3 */ +#define QMU_RX_COZ(x) (BIT(16) << (x)) +#define QMU_RX_ZLP(x) (BIT(0) << (x)) + +/* U3D_TXQCSR1 */ +/* U3D_RXQCSR1 */ +#define QMU_Q_ACTIVE BIT(15) +#define QMU_Q_STOP BIT(2) +#define QMU_Q_RESUME BIT(1) +#define QMU_Q_START BIT(0) + +/* U3D_QISAR0, U3D_QIER0, U3D_QIESR0, U3D_QIECR0 */ +#define QMU_RX_DONE_INT(x) (BIT(16) << (x)) +#define QMU_TX_DONE_INT(x) (BIT(0) << (x)) + +/* U3D_QISAR1, U3D_QIER1, U3D_QIESR1, U3D_QIECR1 */ +#define RXQ_ZLPERR_INT BIT(20) +#define RXQ_LENERR_INT BIT(18) +#define RXQ_CSERR_INT BIT(17) +#define RXQ_EMPTY_INT BIT(16) +#define TXQ_LENERR_INT BIT(2) +#define TXQ_CSERR_INT BIT(1) +#define TXQ_EMPTY_INT BIT(0) + +/* U3D_TQERRIR0, U3D_TQERRIER0, U3D_TQERRIESR0, U3D_TQERRIECR0 */ +#define QMU_TX_LEN_ERR(x) (BIT(16) << (x)) +#define QMU_TX_CS_ERR(x) (BIT(0) << (x)) + +/* U3D_RQERRIR0, U3D_RQERRIER0, U3D_RQERRIESR0, U3D_RQERRIECR0 */ +#define QMU_RX_LEN_ERR(x) (BIT(16) << (x)) +#define QMU_RX_CS_ERR(x) (BIT(0) << (x)) + +/* U3D_RQERRIR1, U3D_RQERRIER1, U3D_RQERRIESR1, U3D_RQERRIECR1 */ +#define QMU_RX_ZLP_ERR(n) (BIT(16) << (n)) + +/* U3D_CAP_EPINFO */ +#define CAP_RX_EP_NUM(x) (((x) >> 8) & 0x1f) +#define CAP_TX_EP_NUM(x) ((x) & 0x1f) + +/* U3D_MISC_CTRL */ +#define VBUS_ON BIT(1) +#define VBUS_FRC_EN BIT(0) + +/*---------------- SSUSB_EPCTL_CSR REGISTER DEFINITION ----------------*/ + +#define U3D_DEVICE_CONF (SSUSB_EPCTL_CSR_BASE + 0x0000) +#define U3D_EP_RST (SSUSB_EPCTL_CSR_BASE + 0x0004) + +#define U3D_DEV_LINK_INTR_ENABLE (SSUSB_EPCTL_CSR_BASE + 0x0050) +#define U3D_DEV_LINK_INTR (SSUSB_EPCTL_CSR_BASE + 0x0054) + +/*---------------- SSUSB_EPCTL_CSR FIELD DEFINITION ----------------*/ + +/* U3D_DEVICE_CONF */ +#define DEV_ADDR_MSK GENMASK(30, 24) +#define DEV_ADDR(x) ((0x7f & (x)) << 24) +#define HW_USB2_3_SEL BIT(18) +#define SW_USB2_3_SEL_EN BIT(17) +#define SW_USB2_3_SEL BIT(16) +#define SSUSB_DEV_SPEED(x) ((x) & 0x7) + +/* U3D_EP_RST */ +#define EP1_IN_RST BIT(17) +#define EP1_OUT_RST BIT(1) +#define EP_RST(is_in, epnum) (((is_in) ? BIT(16) : BIT(0)) << (epnum)) +#define EP0_RST BIT(0) + +/* U3D_DEV_LINK_INTR_ENABLE */ +/* U3D_DEV_LINK_INTR */ +#define SSUSB_DEV_SPEED_CHG_INTR BIT(0) + +/*---------------- SSUSB_USB3_MAC_CSR REGISTER DEFINITION ----------------*/ + +#define U3D_LTSSM_CTRL (SSUSB_USB3_MAC_CSR_BASE + 0x0010) +#define U3D_USB3_CONFIG (SSUSB_USB3_MAC_CSR_BASE + 0x001C) + +#define U3D_LINK_STATE_MACHINE (SSUSB_USB3_MAC_CSR_BASE + 0x0134) +#define U3D_LTSSM_INTR_ENABLE (SSUSB_USB3_MAC_CSR_BASE + 0x013C) +#define U3D_LTSSM_INTR (SSUSB_USB3_MAC_CSR_BASE + 0x0140) + +#define U3D_U3U2_SWITCH_CTRL (SSUSB_USB3_MAC_CSR_BASE + 0x0170) + +/*---------------- SSUSB_USB3_MAC_CSR FIELD DEFINITION ----------------*/ + +/* U3D_LTSSM_CTRL */ +#define FORCE_POLLING_FAIL BIT(4) +#define FORCE_RXDETECT_FAIL BIT(3) +#define SOFT_U3_EXIT_EN BIT(2) +#define COMPLIANCE_EN BIT(1) +#define U1_GO_U2_EN BIT(0) + +/* U3D_USB3_CONFIG */ +#define USB3_EN BIT(0) + +/* U3D_LINK_STATE_MACHINE */ +#define LTSSM_STATE(x) ((x) & 0x1f) + +/* U3D_LTSSM_INTR_ENABLE */ +/* U3D_LTSSM_INTR */ +#define U3_RESUME_INTR BIT(18) +#define U3_LFPS_TMOUT_INTR BIT(17) +#define VBUS_FALL_INTR BIT(16) +#define VBUS_RISE_INTR BIT(15) +#define RXDET_SUCCESS_INTR BIT(14) +#define EXIT_U3_INTR BIT(13) +#define EXIT_U2_INTR BIT(12) +#define EXIT_U1_INTR BIT(11) +#define ENTER_U3_INTR BIT(10) +#define ENTER_U2_INTR BIT(9) +#define ENTER_U1_INTR BIT(8) +#define ENTER_U0_INTR BIT(7) +#define RECOVERY_INTR BIT(6) +#define WARM_RST_INTR BIT(5) +#define HOT_RST_INTR BIT(4) +#define LOOPBACK_INTR BIT(3) +#define COMPLIANCE_INTR BIT(2) +#define SS_DISABLE_INTR BIT(1) +#define SS_INACTIVE_INTR BIT(0) + +/* U3D_U3U2_SWITCH_CTRL */ +#define SOFTCON_CLR_AUTO_EN BIT(0) + +/*---------------- SSUSB_USB3_SYS_CSR REGISTER DEFINITION ----------------*/ + +#define U3D_LINK_UX_INACT_TIMER (SSUSB_USB3_SYS_CSR_BASE + 0x020C) +#define U3D_LINK_POWER_CONTROL (SSUSB_USB3_SYS_CSR_BASE + 0x0210) +#define U3D_LINK_ERR_COUNT (SSUSB_USB3_SYS_CSR_BASE + 0x0214) + +/*---------------- SSUSB_USB3_SYS_CSR FIELD DEFINITION ----------------*/ + +/* U3D_LINK_UX_INACT_TIMER */ +#define DEV_U2_INACT_TIMEOUT_MSK GENMASK(23, 16) +#define DEV_U2_INACT_TIMEOUT_VALUE(x) (((x) & 0xff) << 16) +#define U2_INACT_TIMEOUT_MSK GENMASK(15, 8) +#define U1_INACT_TIMEOUT_MSK GENMASK(7, 0) +#define U1_INACT_TIMEOUT_VALUE(x) ((x) & 0xff) + +/* U3D_LINK_POWER_CONTROL */ +#define SW_U2_ACCEPT_ENABLE BIT(9) +#define SW_U1_ACCEPT_ENABLE BIT(8) +#define UX_EXIT BIT(5) +#define LGO_U3 BIT(4) +#define LGO_U2 BIT(3) +#define LGO_U1 BIT(2) +#define SW_U2_REQUEST_ENABLE BIT(1) +#define SW_U1_REQUEST_ENABLE BIT(0) + +/* U3D_LINK_ERR_COUNT */ +#define CLR_LINK_ERR_CNT BIT(16) +#define LINK_ERROR_COUNT GENMASK(15, 0) + +/*---------------- SSUSB_USB2_CSR REGISTER DEFINITION ----------------*/ + +#define U3D_POWER_MANAGEMENT (SSUSB_USB2_CSR_BASE + 0x0004) +#define U3D_DEVICE_CONTROL (SSUSB_USB2_CSR_BASE + 0x000C) +#define U3D_USB2_TEST_MODE (SSUSB_USB2_CSR_BASE + 0x0014) +#define U3D_COMMON_USB_INTR_ENABLE (SSUSB_USB2_CSR_BASE + 0x0018) +#define U3D_COMMON_USB_INTR (SSUSB_USB2_CSR_BASE + 0x001C) +#define U3D_LINK_RESET_INFO (SSUSB_USB2_CSR_BASE + 0x0024) +#define U3D_USB20_FRAME_NUM (SSUSB_USB2_CSR_BASE + 0x003C) +#define U3D_USB20_LPM_PARAMETER (SSUSB_USB2_CSR_BASE + 0x0044) +#define U3D_USB20_MISC_CONTROL (SSUSB_USB2_CSR_BASE + 0x004C) +#define U3D_USB20_OPSTATE (SSUSB_USB2_CSR_BASE + 0x0060) + +/*---------------- SSUSB_USB2_CSR FIELD DEFINITION ----------------*/ + +/* U3D_POWER_MANAGEMENT */ +#define LPM_BESL_STALL BIT(14) +#define LPM_BESLD_STALL BIT(13) +#define LPM_RWP BIT(11) +#define LPM_HRWE BIT(10) +#define LPM_MODE(x) (((x) & 0x3) << 8) +#define ISO_UPDATE BIT(7) +#define SOFT_CONN BIT(6) +#define HS_ENABLE BIT(5) +#define RESUME BIT(2) +#define SUSPENDM_ENABLE BIT(0) + +/* U3D_DEVICE_CONTROL */ +#define DC_HOSTREQ BIT(1) +#define DC_SESSION BIT(0) + +/* U3D_USB2_TEST_MODE */ +#define U2U3_AUTO_SWITCH BIT(10) +#define LPM_FORCE_STALL BIT(8) +#define FIFO_ACCESS BIT(6) +#define FORCE_FS BIT(5) +#define FORCE_HS BIT(4) +#define TEST_PACKET_MODE BIT(3) +#define TEST_K_MODE BIT(2) +#define TEST_J_MODE BIT(1) +#define TEST_SE0_NAK_MODE BIT(0) + +/* U3D_COMMON_USB_INTR_ENABLE */ +/* U3D_COMMON_USB_INTR */ +#define LPM_RESUME_INTR BIT(9) +#define LPM_INTR BIT(8) +#define DISCONN_INTR BIT(5) +#define CONN_INTR BIT(4) +#define SOF_INTR BIT(3) +#define RESET_INTR BIT(2) +#define RESUME_INTR BIT(1) +#define SUSPEND_INTR BIT(0) + +/* U3D_LINK_RESET_INFO */ +#define WTCHRP_MSK GENMASK(19, 16) + +/* U3D_USB20_LPM_PARAMETER */ +#define LPM_BESLCK_U3(x) (((x) & 0xf) << 12) +#define LPM_BESLCK(x) (((x) & 0xf) << 8) +#define LPM_BESLDCK(x) (((x) & 0xf) << 4) +#define LPM_BESL GENMASK(3, 0) + +/* U3D_USB20_MISC_CONTROL */ +#define LPM_U3_ACK_EN BIT(0) + +/*---------------- SSUSB_SIFSLV_IPPC REGISTER DEFINITION ----------------*/ + +#define U3D_SSUSB_IP_PW_CTRL0 (SSUSB_SIFSLV_IPPC_BASE + 0x0000) +#define U3D_SSUSB_IP_PW_CTRL1 (SSUSB_SIFSLV_IPPC_BASE + 0x0004) +#define U3D_SSUSB_IP_PW_CTRL2 (SSUSB_SIFSLV_IPPC_BASE + 0x0008) +#define U3D_SSUSB_IP_PW_CTRL3 (SSUSB_SIFSLV_IPPC_BASE + 0x000C) +#define U3D_SSUSB_IP_PW_STS1 (SSUSB_SIFSLV_IPPC_BASE + 0x0010) +#define U3D_SSUSB_IP_PW_STS2 (SSUSB_SIFSLV_IPPC_BASE + 0x0014) +#define U3D_SSUSB_OTG_STS (SSUSB_SIFSLV_IPPC_BASE + 0x0018) +#define U3D_SSUSB_OTG_STS_CLR (SSUSB_SIFSLV_IPPC_BASE + 0x001C) +#define U3D_SSUSB_IP_XHCI_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0024) +#define U3D_SSUSB_IP_DEV_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0028) +#define U3D_SSUSB_OTG_INT_EN (SSUSB_SIFSLV_IPPC_BASE + 0x002C) +#define U3D_SSUSB_U3_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0030) +#define U3D_SSUSB_U2_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0050) +#define U3D_SSUSB_REF_CK_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x008C) +#define U3D_SSUSB_DEV_RST_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x0098) +#define U3D_SSUSB_HW_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A0) +#define U3D_SSUSB_HW_SUB_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A4) +#define U3D_SSUSB_IP_TRUNK_VERS (U3D_SSUSB_HW_SUB_ID) +#define U3D_SSUSB_PRB_CTRL0 (SSUSB_SIFSLV_IPPC_BASE + 0x00B0) +#define U3D_SSUSB_PRB_CTRL1 (SSUSB_SIFSLV_IPPC_BASE + 0x00B4) +#define U3D_SSUSB_PRB_CTRL2 (SSUSB_SIFSLV_IPPC_BASE + 0x00B8) +#define U3D_SSUSB_PRB_CTRL3 (SSUSB_SIFSLV_IPPC_BASE + 0x00BC) +#define U3D_SSUSB_PRB_CTRL4 (SSUSB_SIFSLV_IPPC_BASE + 0x00C0) +#define U3D_SSUSB_PRB_CTRL5 (SSUSB_SIFSLV_IPPC_BASE + 0x00C4) +#define U3D_SSUSB_IP_SPARE0 (SSUSB_SIFSLV_IPPC_BASE + 0x00C8) + +/*---------------- SSUSB_SIFSLV_IPPC FIELD DEFINITION ----------------*/ + +/* U3D_SSUSB_IP_PW_CTRL0 */ +#define SSUSB_IP_SW_RST BIT(0) + +/* U3D_SSUSB_IP_PW_CTRL1 */ +#define SSUSB_IP_HOST_PDN BIT(0) + +/* U3D_SSUSB_IP_PW_CTRL2 */ +#define SSUSB_IP_DEV_PDN BIT(0) + +/* U3D_SSUSB_IP_PW_CTRL3 */ +#define SSUSB_IP_PCIE_PDN BIT(0) + +/* U3D_SSUSB_IP_PW_STS1 */ +#define SSUSB_IP_SLEEP_STS BIT(30) +#define SSUSB_U3_MAC_RST_B_STS BIT(16) +#define SSUSB_XHCI_RST_B_STS BIT(11) +#define SSUSB_SYS125_RST_B_STS BIT(10) +#define SSUSB_REF_RST_B_STS BIT(8) +#define SSUSB_SYSPLL_STABLE BIT(0) + +/* U3D_SSUSB_IP_PW_STS2 */ +#define SSUSB_U2_MAC_SYS_RST_B_STS BIT(0) + +/* U3D_SSUSB_OTG_STS */ +#define SSUSB_VBUS_VALID BIT(9) + +/* U3D_SSUSB_OTG_STS_CLR */ +#define SSUSB_VBUS_INTR_CLR BIT(6) + +/* U3D_SSUSB_IP_XHCI_CAP */ +#define SSUSB_IP_XHCI_U2_PORT_NUM(x) (((x) >> 8) & 0xff) +#define SSUSB_IP_XHCI_U3_PORT_NUM(x) ((x) & 0xff) + +/* U3D_SSUSB_IP_DEV_CAP */ +#define SSUSB_IP_DEV_U3_PORT_NUM(x) ((x) & 0xff) + +/* U3D_SSUSB_OTG_INT_EN */ +#define SSUSB_VBUS_CHG_INT_A_EN BIT(7) +#define SSUSB_VBUS_CHG_INT_B_EN BIT(6) + +/* U3D_SSUSB_U3_CTRL_0P */ +#define SSUSB_U3_PORT_SSP_SPEED BIT(9) +#define SSUSB_U3_PORT_DUAL_MODE BIT(7) +#define SSUSB_U3_PORT_HOST_SEL BIT(2) +#define SSUSB_U3_PORT_PDN BIT(1) +#define SSUSB_U3_PORT_DIS BIT(0) + +/* U3D_SSUSB_U2_CTRL_0P */ +#define SSUSB_U2_PORT_RG_IDDIG BIT(12) +#define SSUSB_U2_PORT_FORCE_IDDIG BIT(11) +#define SSUSB_U2_PORT_VBUSVALID BIT(9) +#define SSUSB_U2_PORT_OTG_SEL BIT(7) +#define SSUSB_U2_PORT_HOST BIT(2) +#define SSUSB_U2_PORT_PDN BIT(1) +#define SSUSB_U2_PORT_DIS BIT(0) +#define SSUSB_U2_PORT_HOST_SEL (SSUSB_U2_PORT_VBUSVALID | SSUSB_U2_PORT_HOST) + +/* U3D_SSUSB_DEV_RST_CTRL */ +#define SSUSB_DEV_SW_RST BIT(0) + +/* U3D_SSUSB_IP_TRUNK_VERS */ +#define IP_TRUNK_VERS(x) (((x) >> 16) & 0xffff) + +#endif /* _SSUSB_HW_REGS_H_ */ diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c new file mode 100644 index 00000000000..3795e695e87 --- /dev/null +++ b/drivers/usb/mtu3/mtu3_plat.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#include <common.h> +#include <dm/lists.h> +#include <linux/iopoll.h> + +#include "mtu3.h" +#include "mtu3_dr.h" + +void ssusb_set_force_mode(struct ssusb_mtk *ssusb, + enum mtu3_dr_force_mode mode) +{ + u32 value; + + value = mtu3_readl(ssusb->ippc_base, SSUSB_U2_CTRL(0)); + switch (mode) { + case MTU3_DR_FORCE_DEVICE: + value |= SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG; + break; + case MTU3_DR_FORCE_HOST: + value |= SSUSB_U2_PORT_FORCE_IDDIG; + value &= ~SSUSB_U2_PORT_RG_IDDIG; + break; + case MTU3_DR_FORCE_NONE: + value &= ~(SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG); + break; + default: + return; + } + mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value); +} + +/* u2-port0 should be powered on and enabled; */ +int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks) +{ + void __iomem *ibase = ssusb->ippc_base; + u32 value, check_val; + int ret; + + check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE | + SSUSB_REF_RST_B_STS; + + ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value, + ((value & check_val) == check_val), 10000); + if (ret) { + dev_err(ssusb->dev, "clks of sts1 are not stable!\n"); + return ret; + } + + ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value, + (value & SSUSB_U2_MAC_SYS_RST_B_STS), 10000); + if (ret) { + dev_err(ssusb->dev, "mac2 clock is not stable\n"); + return ret; + } + + return 0; +} + +int ssusb_phy_setup(struct ssusb_mtk *ssusb) +{ + struct udevice *dev = ssusb->dev; + struct phy_bulk *phys = &ssusb->phys; + int ret; + + ret = generic_phy_get_bulk(dev, phys); + if (ret) + return ret; + + ret = generic_phy_init_bulk(phys); + if (ret) + return ret; + + ret = generic_phy_power_on_bulk(phys); + if (ret) + generic_phy_exit_bulk(phys); + + return ret; +} + +void ssusb_phy_shutdown(struct ssusb_mtk *ssusb) +{ + generic_phy_power_off_bulk(&ssusb->phys); + generic_phy_exit_bulk(&ssusb->phys); +} + +static int ssusb_rscs_init(struct ssusb_mtk *ssusb) +{ + int ret = 0; + + ret = regulator_set_enable(ssusb->vusb33_supply, true); + if (ret < 0 && ret != -ENOSYS) { + dev_err(ssusb->dev, "failed to enable vusb33\n"); + goto vusb33_err; + } + + ret = clk_enable_bulk(&ssusb->clks); + if (ret) + goto clks_err; + + ret = ssusb_phy_setup(ssusb); + if (ret) { + dev_err(ssusb->dev, "failed to setup phy\n"); + goto phy_err; + } + + return 0; + +phy_err: + clk_disable_bulk(&ssusb->clks); +clks_err: + regulator_set_enable(ssusb->vusb33_supply, false); +vusb33_err: + return ret; +} + +static void ssusb_rscs_exit(struct ssusb_mtk *ssusb) +{ + clk_disable_bulk(&ssusb->clks); + regulator_set_enable(ssusb->vusb33_supply, false); + ssusb_phy_shutdown(ssusb); +} + +static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb) +{ + /* reset whole ip (xhci & u3d) */ + mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST); + udelay(1); + mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST); +} + +static int get_ssusb_rscs(struct udevice *dev, struct ssusb_mtk *ssusb) +{ + struct udevice *child; + int ret; + + ret = device_get_supply_regulator(dev, "vusb33-supply", + &ssusb->vusb33_supply); + if (ret) /* optional, ignore error */ + dev_warn(dev, "can't get optional vusb33 %d\n", ret); + + ret = device_get_supply_regulator(dev, "vbus-supply", + &ssusb->vbus_supply); + if (ret) /* optional, ignore error */ + dev_warn(dev, "can't get optional vbus regulator %d!\n", ret); + + ret = clk_get_bulk(dev, &ssusb->clks); + if (ret) { + dev_err(dev, "failed to get clocks %d!\n", ret); + return ret; + } + + ssusb->ippc_base = devfdt_remap_addr_name(dev, "ippc"); + if (!ssusb->ippc_base) { + dev_err(dev, "error mapping memory for ippc\n"); + return -ENODEV; + } + + ret = device_find_first_child(dev, &child); + if (ret || !child) { + dev_err(dev, "failed to get child %d!\n", ret); + return ret; + } + + ssusb->mac_base = devfdt_remap_addr_name(child, "mac"); + if (!ssusb->mac_base) { + dev_err(dev, "error mapping memory for mac\n"); + return -ENODEV; + } + + ssusb->dr_mode = usb_get_dr_mode(child->node); + + if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN || + ssusb->dr_mode == USB_DR_MODE_OTG) + ssusb->dr_mode = USB_DR_MODE_PERIPHERAL; + + if (IS_ENABLED(CONFIG_USB_MTU3_GADGET)) + ssusb->dr_mode = USB_DR_MODE_PERIPHERAL; + else if (IS_ENABLED(CONFIG_USB_MTU3_HOST)) + ssusb->dr_mode = USB_DR_MODE_HOST; + + dev_info(dev, "dr_mode: %d, ippc: 0x%p, mac: 0x%p\n", + ssusb->dr_mode, ssusb->ippc_base, ssusb->mac_base); + + return 0; +} + +static int mtu3_probe(struct udevice *dev) +{ + struct ssusb_mtk *ssusb = dev_get_priv(dev); + int ret = -ENOMEM; + + ssusb->dev = dev; + + ret = get_ssusb_rscs(dev, ssusb); + if (ret) + return ret; + + ret = ssusb_rscs_init(ssusb); + if (ret) + return ret; + + ssusb_ip_sw_reset(ssusb); + + return 0; +} + +static int mtu3_remove(struct udevice *dev) +{ + struct ssusb_mtk *ssusb = dev_to_ssusb(dev); + + ssusb_rscs_exit(ssusb); + return 0; +} + +static const struct udevice_id ssusb_of_match[] = { + {.compatible = "mediatek,ssusb",}, + {}, +}; + +#if CONFIG_IS_ENABLED(DM_USB_GADGET) +int dm_usb_gadget_handle_interrupts(struct udevice *dev) +{ + struct mtu3 *mtu = dev_get_priv(dev); + + mtu3_irq(0, mtu); + + return 0; +} + +static int mtu3_gadget_probe(struct udevice *dev) +{ + struct ssusb_mtk *ssusb = dev_to_ssusb(dev->parent); + struct mtu3 *mtu = dev_get_priv(dev); + + mtu->dev = dev; + ssusb->u3d = mtu; + return ssusb_gadget_init(ssusb); +} + +static int mtu3_gadget_remove(struct udevice *dev) +{ + struct mtu3 *mtu = dev_get_priv(dev); + + ssusb_gadget_exit(mtu->ssusb); + return 0; +} + +U_BOOT_DRIVER(mtu3_peripheral) = { + .name = "mtu3-peripheral", + .id = UCLASS_USB_GADGET_GENERIC, + .of_match = ssusb_of_match, + .probe = mtu3_gadget_probe, + .remove = mtu3_gadget_remove, + .priv_auto_alloc_size = sizeof(struct mtu3), +}; +#endif + +#if defined(CONFIG_SPL_USB_HOST_SUPPORT) || \ + (!defined(CONFIG_SPL_BUILD) && defined(CONFIG_USB_HOST)) +static int mtu3_host_probe(struct udevice *dev) +{ + struct ssusb_mtk *ssusb = dev_to_ssusb(dev->parent); + struct mtu3_host *u3h = dev_get_priv(dev); + struct xhci_hcor *hcor; + int rc; + + u3h->dev = dev; + ssusb->u3h = u3h; + rc = ssusb_host_init(ssusb); + if (rc) + return rc; + + u3h->ctrl.quirks = XHCI_MTK_HOST; + hcor = (struct xhci_hcor *)((uintptr_t)u3h->hcd + + HC_LENGTH(xhci_readl(&u3h->hcd->cr_capbase))); + + return xhci_register(dev, u3h->hcd, hcor); +} + +static int mtu3_host_remove(struct udevice *dev) +{ + struct mtu3_host *u3h = dev_get_priv(dev); + + xhci_deregister(dev); + ssusb_host_exit(u3h->ssusb); + return 0; +} + +U_BOOT_DRIVER(mtu3_host) = { + .name = "mtu3-host", + .id = UCLASS_USB, + .of_match = ssusb_of_match, + .probe = mtu3_host_probe, + .remove = mtu3_host_remove, + .priv_auto_alloc_size = sizeof(struct mtu3_host), + .ops = &xhci_usb_ops, + .flags = DM_FLAG_ALLOC_PRIV_DMA, +}; +#endif + +static int mtu3_glue_bind(struct udevice *parent) +{ + struct udevice *dev; + enum usb_dr_mode dr_mode; + const char *driver; + const char *name; + ofnode node; + int ret; + + node = ofnode_by_compatible(parent->node, "mediatek,ssusb"); + if (!ofnode_valid(node)) + return -ENODEV; + + name = ofnode_get_name(node); + dr_mode = usb_get_dr_mode(node); + + switch (dr_mode) { +#if CONFIG_IS_ENABLED(DM_USB_GADGET) + case USB_DR_MODE_PERIPHERAL: + case USB_DR_MODE_OTG: + dev_dbg(parent, "%s: dr_mode: peripheral\n", __func__); + driver = "mtu3-peripheral"; + break; +#endif + +#if defined(CONFIG_SPL_USB_HOST_SUPPORT) || \ + (!defined(CONFIG_SPL_BUILD) && defined(CONFIG_USB_HOST)) + case USB_DR_MODE_HOST: + dev_dbg(parent, "%s: dr_mode: host\n", __func__); + driver = "mtu3-host"; + break; +#endif + default: + dev_err(parent, "%s: unsupported dr_mode %d\n", + __func__, dr_mode); + return -ENODEV; + }; + + dev_dbg(parent, "%s: node name: %s, driver %s, dr_mode %d\n", + __func__, name, driver, dr_mode); + + ret = device_bind_driver_to_node(parent, driver, name, node, &dev); + if (ret) + dev_err(parent, "%s: not able to bind usb device mode\n", + __func__); + + return ret; +} + +static const struct udevice_id mtu3_of_match[] = { + {.compatible = "mediatek,mtu3",}, + {}, +}; + +U_BOOT_DRIVER(mtu3) = { + .name = "mtu3", + .id = UCLASS_NOP, + .of_match = mtu3_of_match, + .bind = mtu3_glue_bind, + .probe = mtu3_probe, + .remove = mtu3_remove, + .priv_auto_alloc_size = sizeof(struct ssusb_mtk), +}; diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c new file mode 100644 index 00000000000..801c2bc416d --- /dev/null +++ b/drivers/usb/mtu3/mtu3_qmu.c @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mtu3_qmu.c - Queue Management Unit driver for device controller + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +/* + * Queue Management Unit (QMU) is designed to unload SW effort + * to serve DMA interrupts. + * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD), + * SW links data buffers and triggers QMU to send / receive data to + * host / from device at a time. + * And now only GPD is supported. + * + * For more detailed information, please refer to QMU Programming Guide + */ + +#include <asm/cache.h> +#include <cpu_func.h> +#include <linux/iopoll.h> +#include <linux/types.h> + +#include "mtu3.h" + +#define QMU_CHECKSUM_LEN 16 + +#define GPD_FLAGS_HWO BIT(0) +#define GPD_FLAGS_BDP BIT(1) +#define GPD_FLAGS_BPS BIT(2) +#define GPD_FLAGS_IOC BIT(7) + +#define GPD_EXT_FLAG_ZLP BIT(5) + +#define DCACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE + +void mtu3_flush_cache(uintptr_t addr, u32 len) +{ + WARN_ON(!(void *)addr || len == 0); + + flush_dcache_range(addr & ~(DCACHELINE_SIZE - 1), + ALIGN(addr + len, DCACHELINE_SIZE)); +} + +void mtu3_inval_cache(uintptr_t addr, u32 len) +{ + WARN_ON(!(void *)addr || len == 0); + + invalidate_dcache_range(addr & ~(DCACHELINE_SIZE - 1), + ALIGN(addr + len, DCACHELINE_SIZE)); +} + +static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring, + dma_addr_t dma_addr) +{ + dma_addr_t dma_base = ring->dma; + struct qmu_gpd *gpd_head = ring->start; + u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head); + + if (offset >= MAX_GPD_NUM) + return NULL; + + return gpd_head + offset; +} + +static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring, + struct qmu_gpd *gpd) +{ + dma_addr_t dma_base = ring->dma; + struct qmu_gpd *gpd_head = ring->start; + u32 offset; + + offset = gpd - gpd_head; + if (offset >= MAX_GPD_NUM) + return 0; + + return dma_base + (offset * sizeof(*gpd)); +} + +static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) +{ + ring->start = gpd; + ring->enqueue = gpd; + ring->dequeue = gpd; + ring->end = gpd + MAX_GPD_NUM - 1; +} + +static void reset_gpd_list(struct mtu3_ep *mep) +{ + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + struct qmu_gpd *gpd = ring->start; + + if (gpd) { + gpd->flag &= ~GPD_FLAGS_HWO; + gpd_ring_init(ring, gpd); + mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); + } +} + +int mtu3_gpd_ring_alloc(struct mtu3_ep *mep) +{ + struct qmu_gpd *gpd; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + + /* software own all gpds as default */ + gpd = memalign(DCACHELINE_SIZE, QMU_GPD_RING_SIZE); + if (!gpd) + return -ENOMEM; + + memset(gpd, 0, QMU_GPD_RING_SIZE); + ring->dma = (dma_addr_t)gpd; + gpd_ring_init(ring, gpd); + + return 0; +} + +void mtu3_gpd_ring_free(struct mtu3_ep *mep) +{ + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + + kfree(ring->start); + memset(ring, 0, sizeof(*ring)); +} + +void mtu3_qmu_resume(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + void __iomem *mbase = mtu->mac_base; + int epnum = mep->epnum; + u32 offset; + + offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); + + mtu3_writel(mbase, offset, QMU_Q_RESUME); + if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE)) + mtu3_writel(mbase, offset, QMU_Q_RESUME); +} + +static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring) +{ + if (ring->enqueue < ring->end) + ring->enqueue++; + else + ring->enqueue = ring->start; + + return ring->enqueue; +} + +static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) +{ + if (ring->dequeue < ring->end) + ring->dequeue++; + else + ring->dequeue = ring->start; + + return ring->dequeue; +} + +/* check if a ring is emtpy */ +static int gpd_ring_empty(struct mtu3_gpd_ring *ring) +{ + struct qmu_gpd *enq = ring->enqueue; + struct qmu_gpd *next; + + if (ring->enqueue < ring->end) + next = enq + 1; + else + next = ring->start; + + /* one gpd is reserved to simplify gpd preparation */ + return next == ring->dequeue; +} + +int mtu3_prepare_transfer(struct mtu3_ep *mep) +{ + return gpd_ring_empty(&mep->gpd_ring); +} + +static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) +{ + struct qmu_gpd *enq; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + struct qmu_gpd *gpd = ring->enqueue; + struct usb_request *req = &mreq->request; + + /* set all fields to zero as default value */ + memset(gpd, 0, sizeof(*gpd)); + + gpd->buffer = cpu_to_le32((u32)req->dma); + gpd->buf_len = cpu_to_le16(req->length); + + /* get the next GPD */ + enq = advance_enq_gpd(ring); + dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n", + mep->epnum, gpd, enq); + + enq->flag &= ~GPD_FLAGS_HWO; + gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); + + if (req->zero) + gpd->ext_flag |= GPD_EXT_FLAG_ZLP; + + gpd->flag |= GPD_FLAGS_IOC | GPD_FLAGS_HWO; + + mreq->gpd = gpd; + + if (req->length) + mtu3_flush_cache((uintptr_t)req->buf, req->length); + + mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); + + return 0; +} + +static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) +{ + struct qmu_gpd *enq; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + struct qmu_gpd *gpd = ring->enqueue; + struct usb_request *req = &mreq->request; + + /* set all fields to zero as default value */ + memset(gpd, 0, sizeof(*gpd)); + + gpd->buffer = cpu_to_le32((u32)req->dma); + gpd->data_buf_len = cpu_to_le16(req->length); + + /* get the next GPD */ + enq = advance_enq_gpd(ring); + dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n", + mep->epnum, gpd, enq); + + enq->flag &= ~GPD_FLAGS_HWO; + gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); + gpd->flag |= GPD_FLAGS_IOC | GPD_FLAGS_HWO; + + mreq->gpd = gpd; + + mtu3_inval_cache((uintptr_t)req->buf, req->length); + mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); + + return 0; +} + +void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) +{ + if (mep->is_in) + mtu3_prepare_tx_gpd(mep, mreq); + else + mtu3_prepare_rx_gpd(mep, mreq); +} + +int mtu3_qmu_start(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + void __iomem *mbase = mtu->mac_base; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + u8 epnum = mep->epnum; + + if (mep->is_in) { + /* set QMU start address */ + mtu3_writel(mbase, USB_QMU_TQSAR(epnum), ring->dma); + mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN); + /* send zero length packet according to ZLP flag in GPD */ + mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum)); + mtu3_writel(mbase, U3D_TQERRIESR0, + QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum)); + + if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) { + dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum); + return 0; + } + mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START); + + } else { + mtu3_writel(mbase, USB_QMU_RQSAR(epnum), ring->dma); + mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN); + /* don't expect ZLP */ + mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum)); + /* move to next GPD when receive ZLP */ + mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum)); + mtu3_writel(mbase, U3D_RQERRIESR0, + QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum)); + mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum)); + + if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) { + dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum); + return 0; + } + mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START); + } + + return 0; +} + +/* may called in atomic context */ +void mtu3_qmu_stop(struct mtu3_ep *mep) +{ + struct mtu3 *mtu = mep->mtu; + void __iomem *mbase = mtu->mac_base; + int epnum = mep->epnum; + u32 value = 0; + u32 qcsr; + int ret; + + qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); + + if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) { + dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name); + return; + } + mtu3_writel(mbase, qcsr, QMU_Q_STOP); + + ret = readl_poll_timeout(mbase + qcsr, value, + !(value & QMU_Q_ACTIVE), 1000); + if (ret) { + dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name); + return; + } + + dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name); +} + +void mtu3_qmu_flush(struct mtu3_ep *mep) +{ + dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__, + ((mep->is_in) ? "TX" : "RX")); + + /*Stop QMU */ + mtu3_qmu_stop(mep); + reset_gpd_list(mep); +} + +/* + * NOTE: request list maybe is already empty as following case: + * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)--> + * queue_tx --> process_tasklet(meanwhile, the second one is transferred, + * tasklet process both of them)-->qmu_interrupt for second one. + * To avoid upper case, put qmu_done_tx in ISR directly to process it. + */ +static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) +{ + struct mtu3_ep *mep = mtu->in_eps + epnum; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + void __iomem *mbase = mtu->mac_base; + struct qmu_gpd *gpd = ring->dequeue; + struct qmu_gpd *gpd_current = NULL; + struct usb_request *req = NULL; + struct mtu3_request *mreq; + dma_addr_t cur_gpd_dma; + + /*transfer phy address got from QMU register to virtual address */ + cur_gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); + gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); + mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); + + dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", + __func__, epnum, gpd, gpd_current, ring->enqueue); + + while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { + mreq = next_request(mep); + + if (!mreq || mreq->gpd != gpd) { + dev_err(mtu->dev, "no correct TX req is found\n"); + break; + } + + req = &mreq->request; + req->actual = le16_to_cpu(gpd->buf_len); + mtu3_req_complete(mep, req, 0); + + gpd = advance_deq_gpd(ring); + mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); + } + + dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", + __func__, epnum, ring->dequeue, ring->enqueue); +} + +static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) +{ + struct mtu3_ep *mep = mtu->out_eps + epnum; + struct mtu3_gpd_ring *ring = &mep->gpd_ring; + void __iomem *mbase = mtu->mac_base; + struct qmu_gpd *gpd = ring->dequeue; + struct qmu_gpd *gpd_current = NULL; + struct usb_request *req = NULL; + struct mtu3_request *mreq; + dma_addr_t cur_gpd_dma; + + cur_gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum)); + gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); + mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); + + dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", + __func__, epnum, gpd, gpd_current, ring->enqueue); + + while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { + mreq = next_request(mep); + + if (!mreq || mreq->gpd != gpd) { + dev_err(mtu->dev, "no correct RX req is found\n"); + break; + } + req = &mreq->request; + + req->actual = le16_to_cpu(gpd->buf_len); + mtu3_req_complete(mep, req, 0); + + gpd = advance_deq_gpd(ring); + mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); + } + + dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", + __func__, epnum, ring->dequeue, ring->enqueue); +} + +static void qmu_done_isr(struct mtu3 *mtu, u32 done_status) +{ + int i; + + for (i = 1; i < mtu->num_eps; i++) { + if (done_status & QMU_RX_DONE_INT(i)) + qmu_done_rx(mtu, i); + if (done_status & QMU_TX_DONE_INT(i)) + qmu_done_tx(mtu, i); + } +} + +static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status) +{ + void __iomem *mbase = mtu->mac_base; + u32 errval; + int i; + + if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) { + errval = mtu3_readl(mbase, U3D_RQERRIR0); + for (i = 1; i < mtu->num_eps; i++) { + if (errval & QMU_RX_CS_ERR(i)) + dev_err(mtu->dev, "Rx %d CS error!\n", i); + + if (errval & QMU_RX_LEN_ERR(i)) + dev_err(mtu->dev, "RX %d Length error\n", i); + } + mtu3_writel(mbase, U3D_RQERRIR0, errval); + } + + if (qmu_status & RXQ_ZLPERR_INT) { + errval = mtu3_readl(mbase, U3D_RQERRIR1); + for (i = 1; i < mtu->num_eps; i++) { + if (errval & QMU_RX_ZLP_ERR(i)) + dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i); + } + mtu3_writel(mbase, U3D_RQERRIR1, errval); + } + + if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) { + errval = mtu3_readl(mbase, U3D_TQERRIR0); + for (i = 1; i < mtu->num_eps; i++) { + if (errval & QMU_TX_CS_ERR(i)) + dev_err(mtu->dev, "Tx %d checksum error!\n", i); + + if (errval & QMU_TX_LEN_ERR(i)) + dev_err(mtu->dev, "Tx %d zlp error!\n", i); + } + mtu3_writel(mbase, U3D_TQERRIR0, errval); + } +} + +irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu) +{ + void __iomem *mbase = mtu->mac_base; + u32 qmu_status; + u32 qmu_done_status; + + /* U3D_QISAR1 is read update */ + qmu_status = mtu3_readl(mbase, U3D_QISAR1); + qmu_status &= mtu3_readl(mbase, U3D_QIER1); + + qmu_done_status = mtu3_readl(mbase, U3D_QISAR0); + qmu_done_status &= mtu3_readl(mbase, U3D_QIER0); + mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */ + dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", + (qmu_done_status & 0xFFFF), qmu_done_status >> 16, + qmu_status); + + if (qmu_done_status) + qmu_done_isr(mtu, qmu_done_status); + + if (qmu_status) + qmu_exception_isr(mtu, qmu_status); + + return IRQ_HANDLED; +} + +void mtu3_qmu_init(struct mtu3 *mtu) +{ + compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B"); +} + +void mtu3_qmu_exit(struct mtu3 *mtu) +{ +} diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h new file mode 100644 index 00000000000..ba8a3aa309b --- /dev/null +++ b/drivers/usb/mtu3/mtu3_qmu.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mtu3_qmu.h - Queue Management Unit driver header + * + * Copyright (C) 2016 MediaTek Inc. + * + * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> + */ + +#ifndef __MTK_QMU_H__ +#define __MTK_QMU_H__ + +#define MAX_GPD_NUM 16 +#define QMU_GPD_SIZE (sizeof(struct qmu_gpd)) +#define QMU_GPD_RING_SIZE (MAX_GPD_NUM * QMU_GPD_SIZE) + +#define GPD_BUF_SIZE 65532 + +void mtu3_flush_cache(uintptr_t addr, u32 len); +void mtu3_inval_cache(uintptr_t addr, u32 len); + +void mtu3_qmu_stop(struct mtu3_ep *mep); +int mtu3_qmu_start(struct mtu3_ep *mep); +void mtu3_qmu_resume(struct mtu3_ep *mep); +void mtu3_qmu_flush(struct mtu3_ep *mep); + +void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq); +int mtu3_prepare_transfer(struct mtu3_ep *mep); + +int mtu3_gpd_ring_alloc(struct mtu3_ep *mep); +void mtu3_gpd_ring_free(struct mtu3_ep *mep); + +irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu); +void mtu3_qmu_init(struct mtu3 *mtu); +void mtu3_qmu_exit(struct mtu3 *mtu); + +#endif diff --git a/drivers/usb/musb-new/mt85xx.c b/drivers/usb/musb-new/mt85xx.c index c281c38a288..8f0561eeba3 100644 --- a/drivers/usb/musb-new/mt85xx.c +++ b/drivers/usb/musb-new/mt85xx.c @@ -12,6 +12,7 @@ #include <common.h> #include <clk.h> #include <dm.h> +#include <dm/device_compat.h> #include <dm/lists.h> #include <dm/root.h> #include <linux/delay.h> @@ -244,17 +245,17 @@ static int mtk_musb_init(struct musb *musb) ret = clk_enable(&glue->usbpllclk); if (ret) { - dev_err(dev, "failed to enable usbpll clock\n"); + dev_err(musb->controller, "failed to enable usbpll clock\n"); return ret; } ret = clk_enable(&glue->usbmcuclk); if (ret) { - dev_err(dev, "failed to enable usbmcu clock\n"); + dev_err(musb->controller, "failed to enable usbmcu clock\n"); return ret; } ret = clk_enable(&glue->usbclk); if (ret) { - dev_err(dev, "failed to enable usb clock\n"); + dev_err(musb->controller, "failed to enable usb clock\n"); return ret; } diff --git a/drivers/usb/musb-new/musb_core.c b/drivers/usb/musb-new/musb_core.c index 961de99795e..22811a5efb2 100644 --- a/drivers/usb/musb-new/musb_core.c +++ b/drivers/usb/musb-new/musb_core.c @@ -80,6 +80,8 @@ #include <linux/io.h> #else #include <common.h> +#include <dm.h> +#include <dm/device_compat.h> #include <usb.h> #include <linux/bitops.h> #include <linux/bug.h> diff --git a/drivers/usb/musb-new/musb_dsps.c b/drivers/usb/musb-new/musb_dsps.c index eb590885bc5..d55a920ae59 100644 --- a/drivers/usb/musb-new/musb_dsps.c +++ b/drivers/usb/musb-new/musb_dsps.c @@ -32,6 +32,8 @@ #include <plat/usb.h> #else #include <common.h> +#include <dm.h> +#include <dm/device_compat.h> #include <asm/omap_musb.h> #include "linux-compat.h" #endif @@ -338,7 +340,7 @@ static irqreturn_t dsps_interrupt(int irq, void *hci) * Also, DRVVBUS pulses for SRP (but not at 5V) ... */ if ((usbintr & MUSB_INTR_BABBLE) && is_host_enabled(musb)) - pr_info("CAUTION: musb: Babble Interrupt Occured\n"); + pr_info("CAUTION: musb: Babble Interrupt Occurred\n"); if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) { int drvvbus = dsps_readl(reg_base, wrp->status); diff --git a/drivers/usb/musb-new/musb_gadget.c b/drivers/usb/musb-new/musb_gadget.c index 8ba98d8c0e8..5b149dac6dd 100644 --- a/drivers/usb/musb-new/musb_gadget.c +++ b/drivers/usb/musb-new/musb_gadget.c @@ -23,6 +23,8 @@ #include <linux/slab.h> #else #include <common.h> +#include <dm.h> +#include <dm/device_compat.h> #include <linux/bug.h> #include <linux/usb/ch9.h> #include "linux-compat.h" diff --git a/drivers/usb/musb-new/musb_gadget_ep0.c b/drivers/usb/musb-new/musb_gadget_ep0.c index 93f43ba4714..cbd92fca6b6 100644 --- a/drivers/usb/musb-new/musb_gadget_ep0.c +++ b/drivers/usb/musb-new/musb_gadget_ep0.c @@ -19,8 +19,10 @@ #include <linux/interrupt.h> #else #include <common.h> -#include "linux-compat.h" +#include <dm.h> +#include <dm/device_compat.h> #include <asm/processor.h> +#include "linux-compat.h" #endif #include "musb_core.h" diff --git a/drivers/usb/musb-new/musb_host.c b/drivers/usb/musb-new/musb_host.c index 5fa013659c5..acb2d40f3b5 100644 --- a/drivers/usb/musb-new/musb_host.c +++ b/drivers/usb/musb-new/musb_host.c @@ -22,6 +22,8 @@ #include <linux/dma-mapping.h> #else #include <common.h> +#include <dm.h> +#include <dm/device_compat.h> #include <usb.h> #include <linux/bug.h> #include "linux-compat.h" diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index e2e1f9c476b..998271b9b62 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -665,7 +665,7 @@ config VIDEO config CFB_CONSOLE bool "Enable colour frame buffer console" - depends on VIDEO + depends on VIDEO || ARCH_OMAP2PLUS default y if VIDEO help Enables the colour frame buffer driver. This supports colour @@ -939,4 +939,44 @@ config SPLASH_SOURCE In case the environment variable "splashfile" is not defined the default name 'splash.bmp' will be used. +config VIDEO_BMP_GZIP + bool "Gzip compressed BMP image support" + depends on CMD_BMP || SPLASH_SCREEN + help + If this option is set, additionally to standard BMP + images, gzipped BMP images can be displayed via the + splashscreen support or the bmp command. + +config VIDEO_BMP_RLE8 + bool "Run length encoded BMP image (RLE8) support" + depends on DM_VIDEO || CFB_CONSOLE + help + If this option is set, the 8-bit RLE compressed BMP images + is supported. + +config BMP_16BPP + bool "16-bit-per-pixel BMP image support" + depends on DM_VIDEO || LCD + help + Support display of bitmaps file with 16-bit-per-pixel + +config BMP_24BPP + bool "24-bit-per-pixel BMP image support" + depends on DM_VIDEO || LCD + help + Support display of bitmaps file with 24-bit-per-pixel. + +config BMP_32BPP + bool "32-bit-per-pixel BMP image support" + depends on DM_VIDEO || LCD + help + Support display of bitmaps file with 32-bit-per-pixel. + +config VIDEO_VCXK + bool "Enable VCXK video controller driver support" + default n + help + This enables VCXK driver which can be used with VC2K, VC4K + and VC8K devices on various boards from BuS Elektronik GmbH. + endmenu diff --git a/drivers/video/dw_mipi_dsi.c b/drivers/video/dw_mipi_dsi.c index 2743836fb4c..4055ef49b6e 100644 --- a/drivers/video/dw_mipi_dsi.c +++ b/drivers/video/dw_mipi_dsi.c @@ -485,15 +485,27 @@ static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi, static void dw_mipi_dsi_init_pll(struct dw_mipi_dsi *dsi) { + const struct mipi_dsi_phy_ops *phy_ops = dsi->phy_ops; + unsigned int esc_rate; + u32 esc_clk_division; + /* * The maximum permitted escape clock is 20MHz and it is derived from - * lanebyteclk, which is running at "lane_mbps / 8". Thus we want: + * lanebyteclk, which is running at "lane_mbps / 8". + */ + if (phy_ops->get_esc_clk_rate) + phy_ops->get_esc_clk_rate(dsi->device, &esc_rate); + else + esc_rate = 20; /* Default to 20MHz */ + + /* + * We want: * - * (lane_mbps >> 3) / esc_clk_division < 20 + * (lane_mbps >> 3) / esc_clk_division < X * which is: - * (lane_mbps >> 3) / 20 > esc_clk_division + * (lane_mbps >> 3) / X > esc_clk_division */ - u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1; + esc_clk_division = (dsi->lane_mbps >> 3) / esc_rate + 1; dsi_write(dsi, DSI_PWR_UP, RESET); @@ -645,8 +657,13 @@ static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi, static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi) { + const struct mipi_dsi_phy_ops *phy_ops = dsi->phy_ops; + struct mipi_dsi_phy_timing timing = {0x40, 0x40, 0x40, 0x40}; u32 hw_version; + if (phy_ops->get_timing) + phy_ops->get_timing(dsi->device, dsi->lane_mbps, &timing); + /* * TODO dw drv improvements * data & clock lane timers should be computed according to panel @@ -658,16 +675,16 @@ static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi) hw_version = dsi_read(dsi, DSI_VERSION) & VERSION; if (hw_version >= HWVER_131) { - dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME_V131(0x40) | - PHY_LP2HS_TIME_V131(0x40)); + dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME_V131(timing.data_hs2lp) | + PHY_LP2HS_TIME_V131(timing.data_lp2hs)); dsi_write(dsi, DSI_PHY_TMR_RD_CFG, MAX_RD_TIME_V131(10000)); } else { - dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40) | - PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000)); + dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(timing.data_hs2lp) | + PHY_LP2HS_TIME(timing.data_lp2hs) | MAX_RD_TIME(10000)); } - dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40) - | PHY_CLKLP2HS_TIME(0x40)); + dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(timing.clk_hs2lp) + | PHY_CLKLP2HS_TIME(timing.clk_lp2hs)); } static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi) diff --git a/drivers/video/pwm_backlight.c b/drivers/video/pwm_backlight.c index 468a5703bdb..9519180ceb3 100644 --- a/drivers/video/pwm_backlight.c +++ b/drivers/video/pwm_backlight.c @@ -33,7 +33,7 @@ * @cur_level: Current level for the backlight (index or value) * @default_level: Default level for the backlight (index or value) * @min_level: Minimum level of the backlight (full off) - * @min_level: Maximum level of the backlight (full on) + * @max_level: Maximum level of the backlight (full on) * @enabled: true if backlight is enabled */ struct pwm_backlight_priv { @@ -63,7 +63,7 @@ static int set_pwm(struct pwm_backlight_priv *priv) int ret; duty_cycle = priv->period_ns * (priv->cur_level - priv->min_level) / - (priv->max_level - priv->min_level + 1); + (priv->max_level - priv->min_level); ret = pwm_set_config(priv->pwm, priv->channel, priv->period_ns, duty_cycle); if (ret) diff --git a/drivers/watchdog/designware_wdt.c b/drivers/watchdog/designware_wdt.c index 12f09a7a392..7caa6c550cd 100644 --- a/drivers/watchdog/designware_wdt.c +++ b/drivers/watchdog/designware_wdt.c @@ -130,7 +130,7 @@ static int designware_wdt_probe(struct udevice *dev) if (ret) return ret; - priv->clk_khz = clk_get_rate(&clk); + priv->clk_khz = clk_get_rate(&clk) / 1000; if (!priv->clk_khz) return -EINVAL; #else diff --git a/drivers/watchdog/octeontx_wdt.c b/drivers/watchdog/octeontx_wdt.c index 1e0670e0c5c..88708dc5e1c 100644 --- a/drivers/watchdog/octeontx_wdt.c +++ b/drivers/watchdog/octeontx_wdt.c @@ -5,25 +5,90 @@ * https://spdx.org/licenses */ +#include <clk.h> #include <dm.h> #include <errno.h> #include <wdt.h> #include <asm/io.h> +#include <linux/bitfield.h> DECLARE_GLOBAL_DATA_PTR; +#define CORE0_WDOG_OFFSET 0x40000 #define CORE0_POKE_OFFSET 0x50000 #define CORE0_POKE_OFFSET_MASK 0xfffffULL +#define WDOG_MODE GENMASK_ULL(1, 0) +#define WDOG_LEN GENMASK_ULL(19, 4) +#define WDOG_CNT GENMASK_ULL(43, 20) + struct octeontx_wdt { void __iomem *reg; + struct clk clk; }; +static int octeontx_wdt_start(struct udevice *dev, u64 timeout_ms, ulong flags) +{ + struct octeontx_wdt *priv = dev_get_priv(dev); + u64 clk_rate, val; + u64 tout_wdog; + + clk_rate = clk_get_rate(&priv->clk); + if (IS_ERR_VALUE(clk_rate)) + return -EINVAL; + + /* Watchdog counts in 1024 cycle steps */ + tout_wdog = (clk_rate * timeout_ms / 1000) >> 10; + + /* + * We can only specify the upper 16 bits of a 24 bit value. + * Round up + */ + tout_wdog = (tout_wdog + 0xff) >> 8; + + /* If the timeout overflows the hardware limit, set max */ + if (tout_wdog >= 0x10000) + tout_wdog = 0xffff; + + val = FIELD_PREP(WDOG_MODE, 0x3) | + FIELD_PREP(WDOG_LEN, tout_wdog) | + FIELD_PREP(WDOG_CNT, tout_wdog << 8); + writeq(val, priv->reg + CORE0_WDOG_OFFSET); + + return 0; +} + +static int octeontx_wdt_stop(struct udevice *dev) +{ + struct octeontx_wdt *priv = dev_get_priv(dev); + + writeq(0, priv->reg + CORE0_WDOG_OFFSET); + + return 0; +} + +static int octeontx_wdt_expire_now(struct udevice *dev, ulong flags) +{ + octeontx_wdt_stop(dev); + + /* Start with 100ms timeout to expire immediately */ + octeontx_wdt_start(dev, 100, flags); + + return 0; +} + static int octeontx_wdt_reset(struct udevice *dev) { struct octeontx_wdt *priv = dev_get_priv(dev); - writeq(~0ULL, priv->reg); + writeq(~0ULL, priv->reg + CORE0_POKE_OFFSET); + + return 0; +} + +static int octeontx_wdt_remove(struct udevice *dev) +{ + octeontx_wdt_stop(dev); return 0; } @@ -31,24 +96,35 @@ static int octeontx_wdt_reset(struct udevice *dev) static int octeontx_wdt_probe(struct udevice *dev) { struct octeontx_wdt *priv = dev_get_priv(dev); + int ret; priv->reg = dev_remap_addr(dev); if (!priv->reg) return -EINVAL; /* - * Save core poke register address in reg (its not 0xa0000 as - * extracted from the DT but 0x50000 instead) + * Save base register address in reg masking lower 20 bits + * as 0xa0000 appears when extracted from the DT */ priv->reg = (void __iomem *)(((u64)priv->reg & - ~CORE0_POKE_OFFSET_MASK) | - CORE0_POKE_OFFSET); + ~CORE0_POKE_OFFSET_MASK)); + + ret = clk_get_by_index(dev, 0, &priv->clk); + if (ret < 0) + return ret; + + ret = clk_enable(&priv->clk); + if (ret) + return ret; return 0; } static const struct wdt_ops octeontx_wdt_ops = { .reset = octeontx_wdt_reset, + .start = octeontx_wdt_start, + .stop = octeontx_wdt_stop, + .expire_now = octeontx_wdt_expire_now, }; static const struct udevice_id octeontx_wdt_ids[] = { @@ -63,4 +139,6 @@ U_BOOT_DRIVER(wdt_octeontx) = { .ops = &octeontx_wdt_ops, .priv_auto_alloc_size = sizeof(struct octeontx_wdt), .probe = octeontx_wdt_probe, + .remove = octeontx_wdt_remove, + .flags = DM_FLAG_OS_PREPARE, }; |