diff options
author | yagi <yagi@ke66.alps.lineo.co.jp> | 2012-06-08 19:46:53 +0900 |
---|---|---|
committer | Justin Waters <justin.waters@timesys.com> | 2012-07-03 17:15:09 -0400 |
commit | ca1aae2ad0eac8089afebf56da2fdaebd8580213 (patch) | |
tree | e4d126af0204940239f23f63af7bbd6f94e3e533 | |
parent | abc9338fc082ace655c6d418adc5021a53900b84 (diff) |
add: FEC & eDMA
-rw-r--r-- | arch/arm/include/asm/mvf_edma.h | 188 | ||||
-rw-r--r-- | arch/arm/include/asm/mvf_edma_regs.h | 313 | ||||
-rw-r--r-- | arch/arm/include/asm/mvf_switch.h | 619 | ||||
-rw-r--r-- | arch/arm/mach-mvf/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/mach-mvf/l2switch.c | 327 | ||||
-rw-r--r-- | arch/arm/mach-mvf/mm.c | 2 | ||||
-rw-r--r-- | arch/arm/mach-mvf/mvf_fec.c | 7 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 7 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/mvf_edma.c | 555 | ||||
-rw-r--r-- | drivers/net/Kconfig | 7 | ||||
-rw-r--r-- | drivers/net/Makefile | 4 | ||||
-rwxr-xr-x | drivers/net/fec.c | 5 | ||||
-rw-r--r-- | drivers/net/fec.h | 4 | ||||
-rw-r--r-- | drivers/net/fec_1588.c | 4 | ||||
-rw-r--r-- | drivers/net/mvf_switch.c | 4535 | ||||
-rw-r--r-- | drivers/net/mvf_switch.h | 661 | ||||
-rw-r--r-- | drivers/net/phy/micrel.c | 22 | ||||
-rw-r--r-- | include/linux/fsl_devices.h | 16 | ||||
-rw-r--r-- | include/linux/micrel_phy.h | 1 |
20 files changed, 7270 insertions, 9 deletions
diff --git a/arch/arm/include/asm/mvf_edma.h b/arch/arm/include/asm/mvf_edma.h new file mode 100644 index 000000000000..4d8e2523753e --- /dev/null +++ b/arch/arm/include/asm/mvf_edma.h @@ -0,0 +1,188 @@ + /* + * mvf_edma.h - mvf eDMA driver header file. + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. All Rights Reserved. + * + * Add support for mvf platform (Lanttor.Guo@freescale.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _MCF_EDMA_H +#define _MCF_EDMA_H + +#include <linux/interrupt.h> +#include <asm/mvf_edma_regs.h> +#include <linux/scatterlist.h> + +#define MCF_EDMA_INT0_CHANNEL_BASE (8) +#define MCF_EDMA_INT0_CONTROLLER_BASE (64) +#define MCF_EDMA_INT0_BASE (MCF_EDMA_INT0_CHANNEL_BASE + MCF_EDMA_INT0_CONTROLLER_BASE) +#define MCF_EDMA_INT0_NUM (16) +#define MCF_EDMA_INT0_END (MCF_EDMA_INT0_NUM) + +#define MCF_EDMA_INT1_CHANNEL_BASE (8) +#define MCF_EDMA_INT1_CONTROLLER_BASE (128) +#define MCF_EDMA_INT1_BASE (MCF_EDMA_INT1_CHANNEL_BASE + MCF_EDMA_INT1_CONTROLLER_BASE) +#define MCF_EDMA_INT1_NUM (40) +#define MCF_EDMA_INT1_END (MCF_EDMA_INT0_END + MCF_EDMA_INT1_NUM) + +#define MCF_EDMA_INT2_CHANNEL_BASE (0) +#define MCF_EDMA_INT2_CONTROLLER_BASE (192) +#define MCF_EDMA_INT2_BASE (MCF_EDMA_INT2_CHANNEL_BASE + MCF_EDMA_INT2_CONTROLLER_BASE) +#define MCF_EDMA_INT2_NUM (8) +#define MCF_EDMA_INT2_END (MCF_EDMA_INT1_END + MCF_EDMA_INT2_NUM) + +#define MCF_EDMA_CHANNEL_ANY (0xFF) +#define MCF_EDMA_INT_ERR (16) /* edma error interrupt */ + +#define MCF_EDMA_TCD_PER_CHAN 256 +#define MVF_EACH_DMA_CHANNEL 32 + +/* Setup transfer control descriptor (TCD) + * channel - descriptor number + * source - source address + * dest - destination address + * attr - attributes + * soff - source offset + * nbytes - number of bytes to be transfered in minor loop + * slast - last source address adjustment + * citer - major loop count + * biter - begining minor loop count + * doff - destination offset + * dlast_sga - last destination address adjustment + * major_int - generate interrupt after each major loop + * disable_req - disable DMA request after major loop + */ +#if 0 +void mvf_edma_set_tcd_params(int channel, u32 source, u32 dest, + u32 attr, u32 soff, u32 nbytes, u32 slast, + u32 citer, u32 biter, u32 doff, u32 dlast_sga, + int major_int, int disable_req); +#endif + +/* Setup transfer control descriptor (TCD) and enable halfway irq + * channel - descriptor number + * source - source address + * dest - destination address + * attr - attributes + * soff - source offset + * nbytes - number of bytes to be transfered in minor loop + * slast - last source address adjustment + * biter - major loop count + * doff - destination offset + * dlast_sga - last destination address adjustment + * disable_req - disable DMA request after major loop + */ +void mvf_edma_set_tcd_params_halfirq(int channel, u32 source, u32 dest, + u32 attr, u32 soff, u32 nbytes, u32 slast, + u32 biter, u32 doff, u32 dlast_sga, + int disable_req); + +/* check if dma is done + * channel - descriptor number + * return 1 if done + */ +int mvf_edma_check_done(int channel); + +#if 0 + +/* Starts eDMA transfer on specified channel + * channel - eDMA TCD number + */ +static inline void +mvf_edma_start_transfer(int channel) +{ + MCF_EDMA_SERQ = channel; + MCF_EDMA_SSRT = channel; +} + +/* Restart eDMA transfer from halfirq + * channel - eDMA TCD number + */ +static inline void +mvf_edma_confirm_halfirq(int channel) +{ + /*MCF_EDMA_TCD_CSR(channel) = 7;*/ + MCF_EDMA_SSRT = channel; +} + +/* Starts eDMA transfer on specified channel based on peripheral request + * channel - eDMA TCD number + */ +static inline void mvf_edma_enable_transfer(int channel) +{ + MCF_EDMA_SERQ = channel; +} + + +/* Stops eDMA transfer + * channel - eDMA TCD number + */ +static inline void +mvf_edma_stop_transfer(int channel) +{ + MCF_EDMA_CINT = channel; + MCF_EDMA_CERQ = channel; +} + +/* Confirm that interrupt has been handled + * channel - eDMA TCD number + */ +static inline void +mvf_edma_confirm_interrupt_handled(int channel) +{ + MCF_EDMA_CINT = channel; +} +#endif + + +/** + * mvf_edma_request_channel - Request an eDMA channel + * @channel: channel number. In case it is equal to EDMA_CHANNEL_ANY + * it will be allocated a first free eDMA channel. + * @handler: dma handler + * @error_handler: dma error handler + * @irq_level: irq level for the dma handler + * @arg: argument to pass back + * @lock: optional spinlock to hold over interrupt + * @device_id: device id + * + * Returns allocatedd channel number if success or + * a negative value if failure. + */ +int mvf_edma_request_channel(int channel, + irqreturn_t(*handler) (int, void *), + void (*error_handler) (int, void *), + u8 irq_level, + void *arg, + spinlock_t *lock, const char *device_id); + +/** + * Update the channel callback/arg + * @channel: channel number + * @handler: dma handler + * @error_handler: dma error handler + * @arg: argument to pass back + * + * Returns 0 if success or a negative value if failure + */ +int mvf_edma_set_callback(int channel, + irqreturn_t(*handler) (int, void *), + void (*error_handler) (int, void *), void *arg); + +/** + * Free the edma channel + * @channel: channel number + * @arg: argument created with + * + * Returns 0 if success or a negative value if failure + */ +int mvf_edma_free_channel(int channel, void *arg); + +void mvf_edma_dump_channel(int channel); + +#endif /* _MCF_EDMA_H */ diff --git a/arch/arm/include/asm/mvf_edma_regs.h b/arch/arm/include/asm/mvf_edma_regs.h new file mode 100644 index 000000000000..d5d2f253df07 --- /dev/null +++ b/arch/arm/include/asm/mvf_edma_regs.h @@ -0,0 +1,313 @@ +/* mvf_edma_regs.h + * + * Copyright (C) 2012 Freescale Semiconductor, Inc. All rights reserved. + * Lanttor.Guo@freescale.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _MVF_EDMA_REG_H__ +#define _MVF_EDMA_REG_H__ + +#define MVF_REG32(x) *((unsigned long *)x) +#define MVF_REG16(x) *((unsigned short *)x) +#define MVF_REG08(x) *((unsigned char *)x) + + +/* + * Enhanced DMA (EDMA) + */ + +/* Channels */ +#define MVF_EDMA_CHAN_DREQ0 0 /* External DMA request 0 */ +#define MVF_EDMA_CHAN_DREQ1 1 /* External DMA request 1 */ +#define MVF_EDMA_CHAN_UART0_RX 2 /* UART0 Receive */ +#define MVF_EDMA_CHAN_UART0_TX 3 /* UART0 Transmit */ +#define MVF_EDMA_CHAN_UART1_RX 4 /* UART1 Receive */ +#define MVF_EDMA_CHAN_UART1_TX 5 /* UART1 Transmit */ +#define MVF_EDMA_CHAN_UART2_RX 6 /* UART2 Receive */ +#define MVF_EDMA_CHAN_UART2_TX 7 /* UART2 Transmit */ +#define MVF_EDMA_CHAN_TIMER0 8 /* Timer 0 / SSI0 Rx */ +#define MVF_EDMA_CHAN_TIMER1 9 /* Timer 1 / SSI1 Rx */ +#define MVF_EDMA_CHAN_TIMER2 10 /* Timer 2 / SSI0 Tx */ +#define MVF_EDMA_CHAN_TIMER3 11 /* Timer 3 / SSI1 Tx */ +#define MVF_EDMA_CHAN_DSPI0_RX 12 /* DSPI0 Receive */ +#define MVF_EDMA_CHAN_DSPI0_TX 13 /* DSPI0 Transmit */ +#define MVF_EDMA_CHAN_DSPI1_RX 14 /* DSPI1 Receive */ +#define MVF_EDMA_CHAN_DSPI1_TX 15 /* DSPI1 Transmit */ +#define MVF_EDMA_CHAN_UART3_RX 16 /* UART3 Receive */ +#define MVF_EDMA_CHAN_UART3_TX 17 /* UART3 Transmit */ +#define MVF_EDMA_CHAN_UART4_RX 18 /* UART4 Receive */ +#define MVF_EDMA_CHAN_UART4_TX 19 /* UART4 Transmit */ +#define MVF_EDMA_CHAN_UART5_RX 20 /* UART5 Receive */ +#define MVF_EDMA_CHAN_UART5_TX 21 /* UART5 Transmit */ +#define MVF_EDMA_CHAN_UART6_RX 22 /* UART6 Receive */ +#define MVF_EDMA_CHAN_UART6_TX 23 /* UART6 Transmit */ +#define MVF_EDMA_CHAN_I2C0 24 /* I2C0 */ +#define MVF_EDMA_CHAN_I2C1 25 /* I2C1 */ +#define MVF_EDMA_CHAN_I2C2 26 /* I2C2 */ +#define MVF_EDMA_CHAN_I2C3 27 /* I2C3 */ +#define MVF_EDMA_CHAN_DSPI2_RX 28 /* DSPI2 Receive */ +#define MVF_EDMA_CHAN_DSPI2_TX 29 /* DSPI2 Transmit */ +#define MVF_EDMA_CHAN_N0 30 /* Available for software */ +#define MVF_EDMA_CHAN_N1 31 /* Available for software */ +#define MVF_EDMA_CHAN_UART7_RX 32 /* UART7 Receive */ +#define MVF_EDMA_CHAN_UART7_TX 33 /* UART7 Transmit */ +#define MVF_EDMA_CHAN_UART8_RX 34 /* UART8 Receive */ +#define MVF_EDMA_CHAN_UART8_TX 35 /* UART8 Transmit */ +#define MVF_EDMA_CHAN_UART9_RX 36 /* UART9 Receive */ +#define MVF_EDMA_CHAN_UART9_TX 37 /* UART9 Transmit */ +#define MVF_EDMA_CHAN_OW 38 /* 1-Wire */ +#define MVF_EDMA_CHAN_RESERVED 39 /* Reserved */ +#define MVF_EDMA_CHAN_I2C4 40 /* I2C4 */ +#define MVF_EDMA_CHAN_I2C5 41 /* I2C5 */ +#define MVF_EDMA_CHAN_N2 42 /* Available for software */ +#define MVF_EDMA_CHAN_N3 43 /* Available for software */ +#define MVF_EDMA_CHAN_DSPI3_RX 44 /* DSPI3 Receive */ +#define MVF_EDMA_CHAN_DSPI3_TX 45 /* DSPI3 Transmit */ +#define MVF_EDMA_CHAN_SSI0_RX0 48 /* SSI0 Receive 0 */ +#define MVF_EDMA_CHAN_SSI0_RX1 49 /* SSI0 Receive 1 */ +#define MVF_EDMA_CHAN_SSI0_TX0 50 /* SSI0 Transmit 0 */ +#define MVF_EDMA_CHAN_SSI0_TX1 51 /* SSI0 Transmit 1 */ +#define MVF_EDMA_CHAN_SSI1_RX0 52 /* SSI1 Receive 0 */ +#define MVF_EDMA_CHAN_SSI1_RX1 53 /* SSI1 Receive 1 */ +#define MVF_EDMA_CHAN_SSI1_TX0 54 /* SSI1 Transmit 0 */ +#define MVF_EDMA_CHAN_SSI1_TX1 55 /* SSI1 Transmit 1 */ +#define MVF_EDMA_CHAN_PWM_CAP 56 /* PWM Capture */ +#define MVF_EDMA_CHAN_PWM_VAL 57 /* PWM Value */ +#define MVF_EDMA_CHAN_RESERVED2 58 /* Reserved */ +#define MVF_EDMA_CHAN_ESDHC 59 /* eSDHC */ +#define MVF_EDMA_CHAN_ADC0 60 /* ADC 0 */ +#define MVF_EDMA_CHAN_ADC1 61 /* ADC 1 */ +#define MVF_EDMA_CHAN_DAC0 62 /* DAC 0 */ +#define MVF_EDMA_CHAN_DAC1 63 /* DAC 1 */ + +/* Register read/write macros */ +/* offset 0x0000_0000 - 0x0000_00ff main dma control area */ +#define MVF_EDMA_CR(base) MVF_REG32((long)(base) + 0x00000000) +#define MVF_EDMA_ES(base) MVF_REG32((long)(base) + 0x00000004) +//#define MVF_EDMA_ERQH(base) MVF_REG32((long)(base) + 0x00000008) +#define MVF_EDMA_ERQ(base) MVF_REG32((long)(base) + 0x0000000C) +//#define MVF_EDMA_EEIH(base) MVF_REG32((long)(base) + 0x00000010) +#define MVF_EDMA_EEI(base) MVF_REG32((long)(base) + 0x00000014) +#define MVF_EDMA_SERQ(base) MVF_REG08((long)(base) + 0x00000008) +#define MVF_EDMA_CERQ(base) MVF_REG08((long)(base) + 0x00000019) +#define MVF_EDMA_SEEI(base) MVF_REG08((long)(base) + 0x0000001A) +#define MVF_EDMA_CEEI(base) MVF_REG08((long)(base) + 0x0000001B) +#define MVF_EDMA_CINT(base) MVF_REG08((long)(base) + 0x0000001C) +#define MVF_EDMA_CERR(base) MVF_REG08((long)(base) + 0x0000001D) +#define MVF_EDMA_SSRT(base) MVF_REG08((long)(base) + 0x0000001E) +#define MVF_EDMA_CDNE(base) MVF_REG08((long)(base) + 0x0000001F) +//#define MVF_EDMA_INTH(base) MVF_REG32((long)(base) + 0x00000020) +#define MVF_EDMA_INT(base) MVF_REG32((long)(base) + 0x00000024) +//#define MVF_EDMA_ERRH(base) MVF_REG32((long)(base) + 0x00000028) +#define MVF_EDMA_ERR(base) MVF_REG32((long)(base) + 0x0000002C) +//#define MVF_EDMA_RSH(base) MVF_REG32((long)(base) + 0x00000030) +#define MVF_EDMA_RS(base) MVF_REG32((long)(base) + 0x00000034) + +/* Parameterized register read/write macros for multiple registers */ +/* offset 0x0000_0100 - 0x0000_011f dma channel priority area */ +#define MVF_EDMA_DCHPRI(base,x) MVF_REG08((long)(base) + 0x00000100 +((x)*0x001)) + + +/* offset 0x0000_1000 - 0x0000_13ff tcd area */ +#define MVF_EDMA_TCD_SADDR(base,x) MVF_REG32((long)(base) + 0x00001000 +((x)*0x020)) +#define MVF_EDMA_TCD_ATTR(base,x) MVF_REG16((long)(base) + 0x00001004 +((x)*0x020)) +#define MVF_EDMA_TCD_SOFF(base,x) MVF_REG16((long)(base) + 0x00001006 +((x)*0x020)) +#define MVF_EDMA_TCD_NBYTES(base,x) MVF_REG32((long)(base) + 0x00001008 +((x)*0x020)) +#define MVF_EDMA_TCD_SLAST(base,x) MVF_REG32((long)(base) + 0x0000100C +((x)*0x020)) +#define MVF_EDMA_TCD_DADDR(base,x) MVF_REG32((long)(base) + 0x00001010 +((x)*0x020)) +#define MVF_EDMA_TCD_CITER_ELINK(base,x) MVF_REG16((long)(base) + 0x00001014 +((x)*0x020)) +#define MVF_EDMA_TCD_CITER(base, x) MVF_REG16((long)(base) + 0x00001014 +((x)*0x020)) +#define MVF_EDMA_TCD_DOFF(base,x) MVF_REG16((long)(base) + 0x00001016 +((x)*0x020)) +#define MVF_EDMA_TCD_DLAST_SGA(base, x) MVF_REG32((long)(base) + 0x00001018 +((x)*0x020)) +#define MVF_EDMA_TCD_BITER_ELINK(base,x) MVF_REG16((long)(base) + 0x0000101C +((x)*0x020)) +#define MVF_EDMA_TCD_BITER(base, x) MVF_REG16((long)(base) + 0x0000101C +((x)*0x020)) +#define MVF_EDMA_TCD_CSR(base,x) MVF_REG16((long)(base) + 0x0000101e +((x)*0x020)) + +/* Bit definitions and macros for CR */ +#define MVF_EDMA_CR_EDBG (0x00000002) +#define MVF_EDMA_CR_ERCA (0x00000004) +#define MVF_EDMA_CR_ERGA (0x00000008) +#define MVF_EDMA_CR_HOE (0x00000010) +#define MVF_EDMA_CR_HALT (0x00000020) +#define MVF_EDMA_CR_CLM (0x00000040) +#define MVF_EDMA_CR_EMLM (0x00000080) +#define MVF_EDMA_CR_GRP0PRI(x) (((x)&0x03)<<8) +#define MVF_EDMA_CR_GRP1PRI(x) (((x)&0x03)<<10) +#define MVF_EDMA_CR_GRP2PRI(x) (((x)&0x03)<<12) +#define MVF_EDMA_CR_GRP3PRI(x) (((x)&0x03)<<14) +#define MVF_EDMA_CR_ECX (0x00010000) +#define MVF_EDMA_CR_CX (0x00020000) + +/* Bit definitions and macros for ES */ +#define MVF_EDMA_ES_DBE (0x00000001) +#define MVF_EDMA_ES_SBE (0x00000002) +#define MVF_EDMA_ES_SGE (0x00000004) +#define MVF_EDMA_ES_NCE (0x00000008) +#define MVF_EDMA_ES_DOE (0x00000010) +#define MVF_EDMA_ES_DAE (0x00000020) +#define MVF_EDMA_ES_SOE (0x00000040) +#define MVF_EDMA_ES_SAE (0x00000080) +#define MVF_EDMA_ES_ERRCHN(x) (((x)&0x0000003F)<<8) +#define MVF_EDMA_ES_CPE (0x00004000) +#define MVF_EDMA_ES_GPE (0x00008000) +#define MVF_EDMA_ES_ECX (0x00010000) +#define MVF_EDMA_ES_VLD (0x80000000) + +/* Bit definitions and macros for ERQ: 0~63 bits */ +#define MVF_EDMA_ERQ_ERQH(x) (0x01<<x) /*32~63*/ +#define MVF_EDMA_ERQ_ERQL(x) (0x01<<x) /*0~31*/ + +/* Bit definitions and macros for EEI: 0~63 bits */ +#define MVF_EDMA_EEI_EEIH(x) (0x01<<x) /*32~63*/ +#define MVF_EDMA_EEI_EEIL(x) (0x01<<x) /*0~31*/ + +/* Bit definitions and macros for SERQ */ +#define MVF_EDMA_SERQ_SERQ(x) (((x)&0x3F)) +#define MVF_EDMA_SERQ_SAER (0x40) +#define MVF_EDMA_SERQ_NOP (0x80) + +/* Bit definitions and macros for CERQ */ +#define MVF_EDMA_CERQ_CERQ(x) (((x)&0x3F)) +#define MVF_EDMA_CERQ_CAER (0x40) +#define MVF_EDMA_CERQ_NOP (0x80) + +/* Bit definitions and macros for SEEI */ +#define MVF_EDMA_SEEI_SEEI(x) (((x)&0x3F)) +#define MVF_EDMA_SEEI_SAEE (0x40) +#define MVF_EDMA_SEEI_NOP (0x80) + +/* Bit definitions and macros for CEEI */ +#define MVF_EDMA_CEEI_CEEI(x) (((x)&0x3F)) +#define MVF_EDMA_CEEI_CAEE (0x40) +#define MVF_EDMA_CEEI_NOP (0x80) + +/* Bit definitions and macros for CINT */ +#define MVF_EDMA_CINT_CINT(x) (((x)&0x3F)) +#define MVF_EDMA_CINT_CAIR (0x40) +#define MVF_EDMA_CINT_NOP (0x80) + +/* Bit definitions and macros for CERR */ +#define MVF_EDMA_CERR_CERR(x) (((x)&0x3F)) +#define MVF_EDMA_CERR_CAER (0x40) +#define MVF_EDMA_CERR_NOP (0x80) + +/* Bit definitions and macros for SSRT */ +#define MVF_EDMA_SSRT_SSRT(x) (((x)&0x3F)) +#define MVF_EDMA_SSRT_SAST (0x40) +#define MVF_EDMA_SSRT_NOP (0x80) + +/* Bit definitions and macros for CDNE */ +#define MVF_EDMA_CDNE_CDNE(x) (((x)&0x3F)) +#define MVF_EDMA_CDNE_CADN (0x40) +#define MVF_EDMA_CDNE_NOP (0x80) + +/* Bit definitions and macros for INTR: 0~63 bits */ +#define MVF_EDMA_INTR_INTH(x) (0x01<<x) /*32~63*/ +#define MVF_EDMA_INTR_INTL(x) (0x01<<x) /*0~31*/ + +/* Bit definitions and macros for ERR: 0~63 bits */ +#define MVF_EDMA_ERR_ERRH(x) (0x01<<x) /*32~63*/ +#define MVF_EDMA_ERR_ERRL(x) (0x01<<x) /*0~31*/ + +/* Bit defineitions and macros for HRSH/HRSL */ +#define MVF_EDMA_HRS_HRSH(x) (0x01<<x) /*32~63*/ +#define MVF_EDMA_HRS_HRSL(x) (0x01<<x) /*0~31*/ + +/* Bit definitions and macros for DCHPRI group */ +#define MVF_EDMA_DCHPRI_CHPRI(x) (((x)&0x0F)) +#define MVF_EDMA_DCHPRI_GRPPRI(x) (((x)&0x03) << 4) +#define MVF_EDMA_DCHPRI_DPA (0x40) +#define MVF_EDMA_DCHPRI_ECP (0x80) + +/* Bit definitions and macros for TCD_SADDR group */ +#define MVF_EDMA_TCD_SADDR_SADDR(x) (x) + +/* Bit definitions and macros for TCD_ATTR group */ +#define MVF_EDMA_TCD_ATTR_DSIZE(x) (((x)&0x0007)) +#define MVF_EDMA_TCD_ATTR_DMOD(x) (((x)&0x001F)<<3) +#define MVF_EDMA_TCD_ATTR_SSIZE(x) (((x)&0x0007)<<8) +#define MVF_EDMA_TCD_ATTR_SMOD(x) (((x)&0x001F)<<11) +#define MVF_EDMA_TCD_ATTR_SSIZE_8BIT (0x0000) +#define MVF_EDMA_TCD_ATTR_SSIZE_16BIT (0x0100) +#define MVF_EDMA_TCD_ATTR_SSIZE_32BIT (0x0200) +#define MVF_EDMA_TCD_ATTR_SSIZE_16BYTE (0x0400) +#define MVF_EDMA_TCD_ATTR_DSIZE_8BIT (0x0000) +#define MVF_EDMA_TCD_ATTR_DSIZE_16BIT (0x0001) +#define MVF_EDMA_TCD_ATTR_DSIZE_32BIT (0x0002) +#define MVF_EDMA_TCD_ATTR_DSIZE_16BYTE (0x0004) + +/* Bit definitions and macros for TCD_SOFF group */ +#define MVF_EDMA_TCD_SOFF_SOFF(x) (x) + +/* Bit definitions and macros for TCD_NBYTES group */ +#define MVF_EDMA_TCD_NBYTES_NBYTES(x) (x) +#define MVF_EDMA_TCD_NBYTES_SMLOE (0x80000000) +#define MVF_EDMA_TCD_NBYTES_DMLOE (0x40000000) +#define MVF_EDMA_TCD_NBYTES_MLOFF(x) (((x)&0xFFFFF)<<20) +#define MVF_EDMA_TCD_NBYTES_9BITS ((x)&0x1FF) + +/* Bit definitions and macros for TCD_SLAST group */ +#define MVF_EDMA_TCD_SLAST_SLAST(x) (x) + +/* Bit definitions and macros for TCD_DADDR group */ +#define MVF_EDMA_TCD_DADDR_DADDR(x) (x) + +/* Bit definitions and macros for TCD_CITER_ELINK group */ +#define MVF_EDMA_TCD_CITER_ELINK_CITER(x) (((x)&0x01FF)) +#define MVF_EDMA_TCD_CITER_ELINK_LINKCH(x) (((x)&0x003F)<<9) +#define MVF_EDMA_TCD_CITER_ELINK_E_LINK (0x8000) + +/* Bit definitions and macros for TCD_CITER group */ +#define MVF_EDMA_TCD_CITER_CITER(x) (((x)&0x7FFF)) +#define MVF_EDMA_TCD_CITER_E_LINK (0x8000) + +/* Bit definitions and macros for TCD_DOFF group */ +#define MVF_EDMA_TCD_DOFF_DOFF(x) (x) + +/* Bit definitions and macros for TCD_DLAST_SGA group */ +#define MVF_EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x) + +/* Bit definitions and macros for TCD_BITER_ELINK group */ +#define MVF_EDMA_TCD_BITER_ELINK_BITER(x) (((x)&0x01FF)) +#define MVF_EDMA_TCD_BITER_ELINK_LINKCH(x) (((x)&0x003F)<<9) +#define MVF_EDMA_TCD_BITER_ELINK_E_LINK (0x8000) + +/* Bit definitions and macros for TCD_BITER group */ +#define MVF_EDMA_TCD_BITER_BITER(x) (((x)&0x7FFF)) +#define MVF_EDMA_TCD_BITER_E_LINK (0x8000) + +/* Bit definitions and macros for TCD_CSR group */ +#define MVF_EDMA_TCD_CSR_START (0x0001) +#define MVF_EDMA_TCD_CSR_INT_MAJOR (0x0002) +#define MVF_EDMA_TCD_CSR_INT_HALF (0x0004) +#define MVF_EDMA_TCD_CSR_D_REQ (0x0008) +#define MVF_EDMA_TCD_CSR_E_SG (0x0010) +#define MVF_EDMA_TCD_CSR_E_LINK (0x0020) +#define MVF_EDMA_TCD_CSR_ACTIVE (0x0040) +#define MVF_EDMA_TCD_CSR_DONE (0x0080) +#define MVF_EDMA_TCD_CSR_LINKCH(x) (((x)&0x003F)<<8) +#define MVF_EDMA_TCD_CSR_BWC(x) (((x)&0x0003)<<14) +#define MVF_EDMA_TCD_CSR_BWC_NO_STALL (0x0000) +#define MVF_EDMA_TCD_CSR_BWC_4CYC_STALL (0x8000) +#define MVF_EDMA_TCD_CSR_BWC_8CYC_STALL (0xC000) + +/* Bit definitions and macros for TCD0_CSR */ +#define MVF_EDMA_TCD0_CSR_START (0x0001) +#define MVF_EDMA_TCD0_CSR_INT_MAJOR (0x0002) +#define MVF_EDMA_TCD0_CSR_INT_HALF (0x0004) +#define MVF_EDMA_TCD0_CSR_D_REQ (0x0008) +#define MVF_EDMA_TCD0_CSR_E_SG (0x0010) +#define MVF_EDMA_TCD0_CSR_E_LINK (0x0020) +#define MVF_EDMA_TCD0_CSR_ACTIVE (0x0040) +#define MVF_EDMA_TCD0_CSR_DONE (0x0080) +#define MVF_EDMA_TCD0_CSR_LINKCH(x) (((x)&0x003F)<<8) +#define MVF_EDMA_TCD0_CSR_BWC(x) (((x)&0x0003)<<14) +#define MVF_EDMA_TCD0_CSR_BWC_NO_STALL (0x0000) +#define MVF_EDMA_TCD0_CSR_BWC_4CYC_STALL (0x8000) +#define MVF_EDMA_TCD0_CSR_BWC_8CYC_STALL (0xC000) + +#endif diff --git a/arch/arm/include/asm/mvf_switch.h b/arch/arm/include/asm/mvf_switch.h new file mode 100644 index 000000000000..a9ddbfcd1635 --- /dev/null +++ b/arch/arm/include/asm/mvf_switch.h @@ -0,0 +1,619 @@ +/****************************************************************************/ +/* + * L2 switch Controller (Etheren switch) driver for VF600. + * based on L2 switch Controller for MCF5441x. + * processors. + * + * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/****************************************************************************/ +#ifndef MVF_SWITCH_H +#define MVF_SWITCH_H +/****************************************************************************/ +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/platform_device.h> +#include <asm/pgtable.h> + +/* + * Some hardware gets it MAC address out of local flash memory. + * if this is non-zero then assume it is the address to get MAC from. + */ +#define FEC_FLASHMAC 0 + +#ifdef CONFIG_SWITCH_DMA_USE_SRAM +#define TX_RING_SIZE 8 /* Must be power of two */ +#define TX_RING_MOD_MASK 7 /* for this to work */ +#else +#define TX_RING_SIZE 16 /* Must be power of two */ +#define TX_RING_MOD_MASK 15 /* for this to work */ +#endif + +#define SWITCH_EPORT_NUMBER 2 + +#define MVF_MII_SWITCH_SPEED 0x09 + + +// register offset for fec +#define FEC_R_CNTRL 0x084 /* Receive control reg */ +#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */ +#define FEC_IEVENT 0x004 /* Interrupt event reg */ +#define FEC_IMASK 0x008 +#define FEC_MII_DATA 0x040 /* MII manage frame reg */ +#define FEC_MII_SPEED 0x044 /* MII speed control reg */ +#define FEC_ECNTRL 0x024 /* Ethernet control reg */ + +/*-----------------------------------------------------------------------*/ +typedef struct l2switch_output_queue_status { + unsigned long ESW_MMSR; + unsigned long ESW_LMT; + unsigned long ESW_LFC; + unsigned long ESW_PCSR; + unsigned long ESW_IOSR; + unsigned long ESW_QWT; + unsigned long esw_reserved; + unsigned long ESW_P0BCT; +} esw_output_queue_status; + +typedef struct l2switch_statistics_status { + /* + * Total number of incoming frames processed + * but discarded in switch + */ + unsigned long ESW_DISCN; + /*Sum of bytes of frames counted in ESW_DISCN*/ + unsigned long ESW_DISCB; + /* + * Total number of incoming frames processed + * but not discarded in switch + */ + unsigned long ESW_NDISCN; + /*Sum of bytes of frames counted in ESW_NDISCN*/ + unsigned long ESW_NDISCB; +} esw_statistics_status; + +typedef struct l2switch_port_statistics_status { + /*outgoing frames discarded due to transmit queue congestion*/ + unsigned long MCF_ESW_POQC; + /*incoming frames discarded due to VLAN domain mismatch*/ + unsigned long MCF_ESW_PMVID; + /*incoming frames discarded due to untagged discard*/ + unsigned long MCF_ESW_PMVTAG; + /*incoming frames discarded due port is in blocking state*/ + unsigned long MCF_ESW_PBL; +} esw_port_statistics_status; + +typedef struct l2switch { + // 0x00-0x34 + unsigned long ESW_REVISION; + unsigned long ESW_SCRATCH; + unsigned long ESW_PER; + unsigned long reserved0[1]; + unsigned long ESW_VLANV; + unsigned long ESW_DBCR; + unsigned long ESW_DMCR; + unsigned long ESW_BKLR; + unsigned long ESW_BMPC; + unsigned long ESW_MODE; + unsigned long ESW_VIMSEL; + unsigned long ESW_VOMSEL; + unsigned long ESW_VIMEN; + unsigned long ESW_VID; + /*from 0x38 0x3C*/ + unsigned long esw_reserved0[2]; + unsigned long ESW_MCR;/*0x40*/ + unsigned long ESW_EGMAP; + unsigned long ESW_INGMAP; + unsigned long ESW_INGSAL; + unsigned long ESW_INGSAH; + unsigned long ESW_INGDAL; + unsigned long ESW_INGDAH; + unsigned long ESW_ENGSAL; + unsigned long ESW_ENGSAH; + unsigned long ESW_ENGDAL; + unsigned long ESW_ENGDAH; + unsigned long ESW_MCVAL;/*0x6C*/ + /*from 0x70--0x7C*/ + unsigned long esw_reserved1[4]; + unsigned long ESW_MMSR;/*0x80*/ + unsigned long ESW_LMT; + unsigned long ESW_LFC; + unsigned long ESW_PCSR; + unsigned long ESW_IOSR; + unsigned long ESW_QWT;/*0x94*/ + unsigned long esw_reserved2[1];/*0x98*/ + unsigned long ESW_P0BCT;/*0x9C*/ + /*from 0xA0-0xB8*/ + unsigned long esw_reserved3[7]; + unsigned long ESW_P0FFEN;/*0xBC*/ + /*MCF_ESW_PSNP(x) 0xFC0DC0C0+((x-1)*0x004))) 0xC0-0xDC*/ + /*#define MCF_ESW_PSNP(x) \ + (*(volatile unsigned long*)(0xFC0DC0C0+((x-1)*0x004)))*/ + unsigned long ESW_PSNP[8]; + /*MCF_ESW_IPSNP(x) 0xFC0DC0E0+((x-1)*0x004) 0xE0-0xFC*/ + /*#define MCF_ESW_IPSNP(x) \ + (*(volatile unsigned long*)(0xFC0DC0E0+((x-1)*0x004)))*/ + unsigned long ESW_IPSNP[8]; + /*port0-port2 VLAN Priority resolution map 0xFC0D_C100-C108*/ + /*#define MCF_ESW_PVRES(x) \ + (*(volatile unsigned long*)(0xFC0DC100+((x)*0x004)))*/ + unsigned long ESW_PVRES[3]; + /*from 0x10C-0x13C*/ + unsigned long esw_reserved4[13]; + unsigned long ESW_IPRES;/*0x140*/ + /*from 0x144-0x17C*/ + unsigned long esw_reserved5[15]; + + /*port0-port2 Priority Configuration 0xFC0D_C180-C188*/ + /*#define MCF_ESW_PRES(x) \ + (*(volatile unsigned long*)(0xFC0DC180+((x)*0x004)))*/ + unsigned long ESW_PRES[3]; + /*from 0x18C-0x1FC*/ + unsigned long esw_reserved6[29]; + + /*port0-port2 VLAN ID 0xFC0D_C200-C208*/ + /*#define MCF_ESW_PID(x) \ + (*(volatile unsigned long*)(0xFC0DC200+((x)*0x004)))*/ + unsigned long ESW_PID[3]; + /*from 0x20C-0x27C*/ + unsigned long esw_reserved7[29]; + + /*port0-port2 VLAN domain resolution entry 0xFC0D_C280-C2FC*/ + /*#define MCF_ESW_VRES(x) \ + (*(volatile unsigned long*)(0xFC0DC280+((x)*0x004)))*/ + unsigned long ESW_VRES[32]; + + unsigned long ESW_DISCN;/*0x300*/ + unsigned long ESW_DISCB; + unsigned long ESW_NDISCN; + unsigned long ESW_NDISCB;/*0xFC0DC30C*/ + /*per port statistics 0xFC0DC310_C33C*/ + /*#define MCF_ESW_POQC(x) \ + (*(volatile unsigned long*)(0xFC0DC310+((x)*0x010))) + #define MCF_ESW_PMVID(x) \ + (*(volatile unsigned long*)(0xFC0DC314+((x)*0x010))) + #define MCF_ESW_PMVTAG(x) \ + (*(volatile unsigned long*)(0xFC0DC318+((x)*0x010))) + #define MCF_ESW_PBL(x) \ + (*(volatile unsigned long*)(0xFC0DC31C+((x)*0x010))) + */ + esw_port_statistics_status port_statistics_status[3]; + /*from 0x340-0x400*/ + unsigned long esw_reserved8[48]; + + /*0xFC0DC400---0xFC0DC418*/ + /*unsigned long MCF_ESW_ISR;*/ + unsigned long switch_ievent; /* Interrupt event reg */ + /*unsigned long MCF_ESW_IMR;*/ + unsigned long switch_imask; /* Interrupt mask reg */ + /*unsigned long MCF_ESW_RDSR;*/ + unsigned long fec_r_des_start; /* Receive descriptor ring */ + /*unsigned long MCF_ESW_TDSR;*/ + unsigned long fec_x_des_start; /* Transmit descriptor ring */ + /*unsigned long MCF_ESW_MRBR;*/ + unsigned long fec_r_buff_size; /* Maximum receive buff size */ + /*unsigned long MCF_ESW_RDAR;*/ + unsigned long fec_r_des_active; /* Receive descriptor reg */ + /*unsigned long MCF_ESW_TDAR;*/ + unsigned long fec_x_des_active; /* Transmit descriptor reg */ + /*from 0x420-0x4FC*/ + unsigned long esw_reserved9[57]; + + /*0xFC0DC500---0xFC0DC508*/ + unsigned long ESW_LREC0; + unsigned long ESW_LREC1; + unsigned long ESW_LSR; +} switch_t; + +typedef struct _64bTableEntry { + unsigned int lo; /* lower 32 bits */ + unsigned int hi; /* upper 32 bits */ +} AddrTable64bEntry; + +typedef struct l2switchaddrtable { + AddrTable64bEntry eswTable64bEntry[2048]; +} eswAddrTable_t; + + +#define MCF_FEC_RCR_PROM (0x00000008) +#define MCF_FEC_RCR_RMII_MODE (0x00000100) +#define MCF_FEC_RCR_MAX_FL(x) (((x)&0x00003FFF)<<16) +#define MCF_FEC_RCR_CRC_FWD (0x00004000) +#define MCF_FEC_TCR_FDEN (0x00000004) +#define MCF_FEC_ECR_ETHER_EN (0x00000002) +#define MCF_FEC_ECR_ENA_1588 (0x00000010) + +typedef struct _eswIOCTL_PORT_CONF { + int port; + int enable; +} eswIoctlPortConfig; + +typedef struct _eswIOCTL_PORT_EN_CONF { + int port; + int tx_enable; + int rx_enable; +} eswIoctlPortEnableConfig; + +typedef struct _eswIOCTL_IP_SNOOP_CONF { + int mode; + unsigned long ip_header_protocol; +} eswIoctlIpsnoopConfig; + +typedef struct _eswIOCTL_P0_FORCED_FORWARD_CONF { + int port1; + int port2; + int enable; +} eswIoctlP0ForcedForwardConfig; + +typedef struct _eswIOCTL_PORT_SNOOP_CONF { + int mode; + unsigned short compare_port; + int compare_num; +} eswIoctlPortsnoopConfig; + +typedef struct _eswIOCTL_PORT_Mirror_CONF { + int mirror_port; + int port; + int egress_en; + int ingress_en; + int egress_mac_src_en; + int egress_mac_des_en; + int ingress_mac_src_en; + int ingress_mac_des_en; + unsigned char *src_mac; + unsigned char *des_mac; + int mirror_enable; +} eswIoctlPortMirrorConfig; + +struct eswIoctlMirrorCfgPortMatch { + int mirror_port; + int port_match_en; + int port; +}; + +struct eswIoctlMirrorCfgAddrMatch { + int mirror_port; + int addr_match_en; + unsigned char *mac_addr; +}; + +typedef struct _eswIOCTL_PRIORITY_VLAN_CONF { + int port; + int func_enable; + int vlan_pri_table_num; + int vlan_pri_table_value; +} eswIoctlPriorityVlanConfig; + +typedef struct _eswIOCTL_PRIORITY_IP_CONF { + int port; + int func_enable; + int ipv4_en; + int ip_priority_num; + int ip_priority_value; +} eswIoctlPriorityIPConfig; + +typedef struct _eswIOCTL_PRIORITY_MAC_CONF { + int port; +} eswIoctlPriorityMacConfig; + +typedef struct _eswIOCTL_PRIORITY_DEFAULT_CONF { + int port; + unsigned char priority_value; +} eswIoctlPriorityDefaultConfig; + +typedef struct _eswIOCTL_IRQ_STATUS { + unsigned long isr; + unsigned long imr; + unsigned long rx_buf_pointer; + unsigned long tx_buf_pointer; + unsigned long rx_max_size; + unsigned long rx_buf_active; + unsigned long tx_buf_active; +} eswIoctlIrqStatus; + +typedef struct _eswIOCTL_PORT_Mirror_STATUS { + unsigned long ESW_MCR; + unsigned long ESW_EGMAP; + unsigned long ESW_INGMAP; + unsigned long ESW_INGSAL; + unsigned long ESW_INGSAH; + unsigned long ESW_INGDAL; + unsigned long ESW_INGDAH; + unsigned long ESW_ENGSAL; + unsigned long ESW_ENGSAH; + unsigned long ESW_ENGDAL; + unsigned long ESW_ENGDAH; + unsigned long ESW_MCVAL; +} eswIoctlPortMirrorStatus; + +typedef struct _eswIOCTL_VLAN_OUTPUT_CONF { + int port; + int mode; +} eswIoctlVlanOutputConfig; + +typedef struct _eswIOCTL_VLAN_INPUT_CONF { + int port; + int mode; + unsigned short port_vlanid; +} eswIoctlVlanInputConfig; + +typedef struct _eswIOCTL_VLAN_DOMAIN_VERIFY_CONF { + int port; + int vlan_domain_verify_en; + int vlan_discard_unknown_en; +} eswIoctlVlanVerificationConfig; + +typedef struct _eswIOCTL_VLAN_RESOULATION_TABLE { + unsigned short port_vlanid; + unsigned char vlan_domain_port; + unsigned char vlan_domain_num; +} eswIoctlVlanResoultionTable; + +struct eswVlanTableItem { + eswIoctlVlanResoultionTable table[32]; + unsigned char valid_num; +}; + +typedef struct _eswIOCTL_VLAN_INPUT_STATUS { + unsigned long ESW_VLANV; + unsigned long ESW_PID[3]; + unsigned long ESW_VIMSEL; + unsigned long ESW_VIMEN; + unsigned long ESW_VRES[32]; +} eswIoctlVlanInputStatus; + +typedef struct _eswIOCTL_Static_MACTable { + unsigned char *mac_addr; + int port; + int priority; +} eswIoctlUpdateStaticMACtable; + +typedef struct _eswIOCTL_OUTPUT_QUEUE{ + int fun_num; + esw_output_queue_status sOutputQueue; +} eswIoctlOutputQueue; +/* + * Info received from Hardware Learning FIFO, + * holding MAC address and corresponding Hash Value and + * port number where the frame was received (disassembled). + */ +typedef struct _eswPortInfo { + /* MAC lower 32 bits (first byte is 7:0). */ + unsigned int maclo; + /* MAC upper 16 bits (47:32). */ + unsigned int machi; + /* the hash value for this MAC address. */ + unsigned int hash; + /* the port number this MAC address is associated with. */ + unsigned int port; +} eswPortInfo; + +/* + * Hardware Look up Address Table 64-bit element. + */ +typedef volatile struct _64bitTableEntry { + unsigned int lo; /* lower 32 bits */ + unsigned int hi; /* upper 32 bits */ +} eswTable64bitEntry; + +struct eswAddrTableEntryExample { + /* the entry number */ + unsigned short entrynum; + /* mac address array */ + unsigned char mac_addr[6]; + unsigned char item1; + unsigned short item2; +}; + +/* + * Define the buffer descriptor structure. + */ +#if defined(CONFIG_ARCH_MXC) +typedef struct bufdesc { + unsigned short cbd_datlen; /* Data length */ + unsigned short cbd_sc; /* Control and status info */ + unsigned long cbd_bufaddr; /* Buffer address */ +#if defined(CONFIG_FEC_1588) + unsigned long ebd_status; + unsigned long cbd_prot; + unsigned long bdu; + unsigned long timestamp; + unsigned long reserverd_word1; + unsigned long reserverd_word2; +#endif +} cbd_t; +#else +typedef struct bufdesc { + unsigned short cbd_sc; /* Control and status info */ + unsigned short cbd_datlen; /* Data length */ + unsigned long cbd_bufaddr; /* Buffer address */ +#if defined(CONFIG_FEC_1588) + unsigned long ebd_status; + unsigned short length_proto_type; + unsigned short payload_checksum; + unsigned long bdu; + unsigned long timestamp; + unsigned long reserverd_word1; + unsigned long reserverd_word2; +#endif +} cbd_t; +#endif + +/* Forward declarations of some structures to support different PHYs + */ +typedef struct { + uint mii_data; + void (*funct)(uint mii_reg, struct net_device *dev); +} phy_cmd_t; + +typedef struct { + uint id; + char *name; + + const phy_cmd_t *config; + const phy_cmd_t *startup; + const phy_cmd_t *ack_int; + const phy_cmd_t *shutdown; +} phy_info_t; + +struct port_status { + /* 1: link is up, 0: link is down */ + int port1_link_status; + int port2_link_status; + /* 1: blocking, 0: unblocking */ + int port0_block_status; + int port1_block_status; + int port2_block_status; +}; + +struct port_all_status { + /* 1: link is up, 0: link is down */ + int link_status; + /* 1: blocking, 0: unblocking */ + int block_status; + /* 1: unlearning, 0: learning */ + int learn_status; + /* vlan domain verify 1: enable 0: disable */ + int vlan_verify; + /* discard unknow 1: enable 0: disable */ + int discard_unknown; + /* multicast resolution 1: enable 0: disable */ + int multi_reso; + /* broadcast resolution 1: enable 0: disalbe */ + int broad_reso; + /* transmit 1: enable 0: disable */ + int ftransmit; + /* receive 1: enable 0: disable */ + int freceive; +}; + +/* The switch buffer descriptors track the ring buffers. The rx_bd_base and + * tx_bd_base always point to the base of the buffer descriptors. The + * cur_rx and cur_tx point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct switch_enet_private { + /* Hardware registers of the switch device */ + volatile switch_t *hwp; + volatile eswAddrTable_t *hwentry; + + void __iomem *fec[2]; + + struct net_device *netdev; + struct platform_device *pdev; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + unsigned char *tx_bounce[TX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + ushort skb_cur; + ushort skb_dirty; + + /* CPM dual port RAM relative addresses. + */ + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t *tx_bd_base; + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ + cbd_t *dirty_tx; /* The ring entries to be free()ed. */ + uint tx_full; + /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ + spinlock_t hw_lock; + + /* hold while accessing the mii_list_t() elements */ + spinlock_t mii_lock; + struct mii_bus *mdio_bus; + struct phy_device *phydev[SWITCH_EPORT_NUMBER]; + + uint phy_id; + uint phy_id_done; + uint phy_status; + uint phy_speed; + phy_info_t const *phy; + struct work_struct phy_task; + volatile switch_t *phy_hwp; + + uint sequence_done; + uint mii_phy_task_queued; + + uint phy_addr; + + int index; + int opened; + int full_duplex; + int msg_enable; + int phy1_link; + int phy1_old_link; + int phy1_duplex; + int phy1_speed; + + int phy2_link; + int phy2_old_link; + int phy2_duplex; + int phy2_speed; + /* --------------Statistics--------------------------- */ + /* when a new element deleted a element with in + * a block due to lack of space */ + int atBlockOverflows; + /* Peak number of valid entries in the address table */ + int atMaxEntries; + /* current number of valid entries in the address table */ + int atCurrEntries; + /* maximum entries within a block found + * (updated within ageing)*/ + int atMaxEntriesPerBlock; + + /* -------------------ageing function------------------ */ + /* maximum age allowed for an entry */ + int ageMax; + /* last LUT entry to block that was + * inspected by the Ageing task*/ + int ageLutIdx; + /* last element within block inspected by the Ageing task */ + int ageBlockElemIdx; + /* complete table has been processed by ageing process */ + int ageCompleted; + /* delay setting */ + int ageDelay; + /* current delay Counter */ + int ageDelayCnt; + + /* ----------------timer related---------------------------- */ + /* current time (for timestamping) */ + int currTime; + /* flag set by timer when currTime changed + * and cleared by serving function*/ + int timeChanged; + + /**/ + /* Timer for Aging */ + struct timer_list timer_aging; + int learning_irqhandle_enable; +}; + +struct switch_platform_private { + struct platform_device *pdev; + + unsigned long quirks; + int num_slots; /* Slots on controller */ + struct switch_enet_private *fep_host[0]; /* Pointers to hosts */ +}; +#endif diff --git a/arch/arm/mach-mvf/Makefile b/arch/arm/mach-mvf/Makefile index a96c70307259..1362c8551aea 100644 --- a/arch/arm/mach-mvf/Makefile +++ b/arch/arm/mach-mvf/Makefile @@ -6,3 +6,4 @@ obj-y := cpu.o mm.o devices.o irq.o clock.o bus_freq.o system.o mvf_fec.o obj-$(CONFIG_MACH_TWR_VF600) += board-twr_vf600.o +obj-$(CONFIG_MVF_ETHER_SWITCH) += l2switch.o diff --git a/arch/arm/mach-mvf/l2switch.c b/arch/arm/mach-mvf/l2switch.c new file mode 100644 index 000000000000..06599fb8cd04 --- /dev/null +++ b/arch/arm/mach-mvf/l2switch.c @@ -0,0 +1,327 @@ +/* + * l2switch.c + * + * Sub-architcture dependant initialization code for the Freescale + * MVF family L2 Switch module. + * + * based on 5441X. + * + * Copyright (C) 2010-2012 Freescale Semiconductor, Inc. All Rights Reserved. + * ShrekWu B16972@freescale.com + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/param.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/fsl_devices.h> + +#include <asm/traps.h> +#include <asm/mvf_switch.h> + +#if (defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6)) || defined(CONFIG_ARCH_MVF) \ + && defined(CONFIG_FEC_1588) +#define CONFIG_ENHANCED_BD +#endif + + +// base address +#define FEC_ETH0 0x400D0000 +#define FEC_ETH1 0x400D1000 +#define L2SWITCH_1 0x400E8000 + +#pragma message "need fix!!!!! L2SWITCH_ATBL" +#define L2SWITCH_ATBL 0x400F0000 + +static unsigned char switch_mac_default[] = { + 0x00, 0x04, 0x9F, 0x00, 0xB3, 0x49, +}; + +static unsigned char switch_mac_addr[6]; + +static void switch_request_intrs(struct net_device *dev, + irqreturn_t switch_net_irq_handler(int irq, void *private), + void *irq_privatedata) +{ + struct switch_enet_private *fep; + int b; + static const struct idesc { + char *name; + unsigned short irq; + } *idp, id[] = { + /*{ "esw_isr(EBERR)", 38 },*/ + { "esw_isr(RxBuffer)", 39 }, + { "esw_isr(RxFrame)", 40 }, + { "esw_isr(TxBuffer)", 41 }, + { "esw_isr(TxFrame)", 42 }, + { "esw_isr(QM)", 43 }, + { "esw_isr(P0OutputDiscard)", 44 }, + { "esw_isr(P1OutputDiscard)", 45 }, + { "esw_isr(P2OutputDiscard)", 46 }, + { "esw_isr(LearningRecord)", 47 }, + { NULL }, + }; + + fep = netdev_priv(dev); + /*intrruption L2 ethernet SWITCH */ + b = 64 + 64 + 64; + + /* Setup interrupt handlers. */ + for (idp = id; idp->name; idp++) { + if (request_irq(b+idp->irq, + switch_net_irq_handler, IRQF_DISABLED, + idp->name, irq_privatedata) != 0) + printk(KERN_ERR "FEC: Could not alloc %s IRQ(%d)!\n", + idp->name, b+idp->irq); + } + + /* Configure RMII */ +// #if 0 +// // set in u-boot +// MCF_GPIO_PAR_FEC = (MCF_GPIO_PAR_FEC & +// MCF_GPIO_PAR_FEC_FEC_MASK) | +// MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL; +// +// MCF_GPIO_PAR_FEC = +// (MCF_GPIO_PAR_FEC & +// MCF_GPIO_PAR_FEC_FEC_MASK) | +// MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL; +// +// MCF_GPIO_SRCR_FEC = 0x0F; +// +// MCF_GPIO_PAR_SIMP0H = +// (MCF_GPIO_PAR_SIMP0H & +// MCF_GPIO_PAR_SIMP0H_DAT_MASK) | +// MCF_GPIO_PAR_SIMP0H_DAT_GPIO; +// +// MCF_GPIO_PDDR_G = +// (MCF_GPIO_PDDR_G & +// MCF_GPIO_PDDR_G4_MASK) | +// MCF_GPIO_PDDR_G4_OUTPUT; +// +// MCF_GPIO_PODR_G = +// (MCF_GPIO_PODR_G & +// MCF_GPIO_PODR_G4_MASK); +// #endif +} + +static void switch_set_mii(struct net_device *dev) +{ + struct switch_enet_private *fep = netdev_priv(dev); + volatile switch_t *fecp; + +// #if 0 +// fecp = fep->hwp; +// +// MCF_FEC_RCR0 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE | +// MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD); +// MCF_FEC_RCR1 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE | +// MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD); +// /* TCR */ +// MCF_FEC_TCR0 = MCF_FEC_TCR_FDEN; +// MCF_FEC_TCR1 = MCF_FEC_TCR_FDEN; +// /* ECR */ +// #ifdef ENHANCE_BUFFER +// MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588; +// MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588; +// #else /*legac buffer*/ +// MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN; +// MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN; +// #endif +// /* +// * Set MII speed to 2.5 MHz +// */ +// MCF_FEC_MSCR0 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; +// MCF_FEC_MSCR1 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; +// #endif +// + + writel((MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE | MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD), fep->fec[0] + FEC_R_CNTRL); + writel((MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE | MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD), fep->fec[1] + FEC_R_CNTRL); + + writel(MCF_FEC_TCR_FDEN, fep->fec[0] + FEC_X_CNTRL); + writel(MCF_FEC_TCR_FDEN, fep->fec[1] + FEC_X_CNTRL); + +#ifdef CONFIG_ENHANCED_BD + writel(MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588, fep->fec[0] + FEC_ECNTRL); + writel(MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588, fep->fec[1] + FEC_ECNTRL); +#else + writel(MCF_FEC_ECR_ETHER_EN , fep->fec[0] + FEC_ECNTRL); + writel(MCF_FEC_ECR_ETHER_EN , fep->fec[1] + FEC_ECNTRL); +#endif + writel( MVF_MII_SWITCH_SPEED, fep->fec[0] + FEC_MII_SPEED); + writel( MVF_MII_SWITCH_SPEED, fep->fec[1] + FEC_MII_SPEED); + +} + +static void switch_get_mac(struct net_device *dev) +{ + struct switch_enet_private *fep = netdev_priv(dev); + volatile switch_t *fecp; + unsigned char *iap; + + fecp = fep->hwp; + + if (FEC_FLASHMAC) { + /* + * Get MAC address from FLASH. + * If it is all 1's or 0's, use the default. + */ + iap = FEC_FLASHMAC; + if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && + (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) + iap = switch_mac_default; + if ((iap[0] == 0xff) && (iap[1] == 0xff) && + (iap[2] == 0xff) && (iap[3] == 0xff) && + (iap[4] == 0xff) && (iap[5] == 0xff)) + iap = switch_mac_default; + + } else { + iap = &switch_mac_addr[0]; + + if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && + (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) + iap = switch_mac_default; + if ((iap[0] == 0xff) && (iap[1] == 0xff) && + (iap[2] == 0xff) && (iap[3] == 0xff) && + (iap[4] == 0xff) && (iap[5] == 0xff)) + iap = switch_mac_default; + } + + memcpy(dev->dev_addr, iap, ETH_ALEN); + /* Adjust MAC if using default MAC address */ + if (iap == switch_mac_default) + dev->dev_addr[ETH_ALEN-1] = switch_mac_default[ETH_ALEN-1] + + fep->index; +} + +static void switch_enable_phy_intr(void) +{ +} + +static void switch_disable_phy_intr(void) +{ +} + +static void switch_phy_ack_intr(void) +{ +} + +static void switch_localhw_setup(void) +{ +} + +static void switch_uncache(unsigned long addr) +{ +} + +static void switch_platform_flush_cache(void) +{ +} + +/* + * Define the fixed address of the FEC hardware. + */ +static unsigned int switch_platform_hw[] = { + L2SWITCH_1, + L2SWITCH_ATBL, +}; + +static unsigned int fec_platform_hw[] = { + FEC_ETH0, + FEC_ETH1, +}; + +static struct mvf_switch_platform_data mvf_switch_data = { + .hash_table = 0, + .fec_hw = fec_platform_hw, + .switch_hw = switch_platform_hw, + .request_intrs = switch_request_intrs, + .set_mii = switch_set_mii, + .get_mac = switch_get_mac, + .enable_phy_intr = switch_enable_phy_intr, + .disable_phy_intr = switch_disable_phy_intr, + .phy_ack_intr = switch_phy_ack_intr, + .localhw_setup = switch_localhw_setup, + .uncache = switch_uncache, + .platform_flush_cache = switch_platform_flush_cache, +}; + +// non-used-structure +static struct resource l2switch_resources[] = { + [0] = { + .start = 0xFC0DC000, + .end = 0xFC0DC508, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = (64 + 64 + 64 + 38), + .end = (64 + 64 + 64 + 48), + .flags = IORESOURCE_IRQ, + }, + [2] = { + .start = 0xFC0E0000, + .end = 0xFC0E3FFC, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device l2switch_mvf_device = { + .name = "mvf-switch", + .id = 0, + .resource = l2switch_resources, + .num_resources = ARRAY_SIZE(l2switch_resources), + .dev = { + .platform_data = &mvf_switch_data, + .coherent_dma_mask = ~0, /* $$$ REVISIT */ + } +}; + + +static int __init mvf_switch_dev_init(void) +{ + int retval = 0; + + retval = platform_device_register(&l2switch_mvf_device); + + if (retval < 0) { + printk(KERN_ERR "MVF L2Switch: platform_device_register" + " failed with code=%d\n", retval); + } + + return retval; +} + +static int __init param_switch_addr_setup(char *str) +{ + char *end; + int i; + + for (i = 0; i < 6; i++) { + switch_mac_addr[i] = str ? simple_strtoul(str, &end, 16) : 0; + if (str) + str = (*end ) ? end + 1 : end; + } + return 0; +} +__setup("switchaddr=", param_switch_addr_setup); + +arch_initcall(mvf_switch_dev_init); diff --git a/arch/arm/mach-mvf/mm.c b/arch/arm/mach-mvf/mm.c index 62afa4ab92d3..8ad09f606888 100644 --- a/arch/arm/mach-mvf/mm.c +++ b/arch/arm/mach-mvf/mm.c @@ -31,7 +31,7 @@ #include <asm/hardware/cache-l2x0.h> /*! - * This structure defines the Faraday memory map. + * This structure defines the MVF memory map. */ static struct map_desc mvf_io_desc[] __initdata = { imx_map_entry(MVF, AIPS0, MT_DEVICE), diff --git a/arch/arm/mach-mvf/mvf_fec.c b/arch/arm/mach-mvf/mvf_fec.c index 83a92feac2dc..009dbc168a96 100644 --- a/arch/arm/mach-mvf/mvf_fec.c +++ b/arch/arm/mach-mvf/mvf_fec.c @@ -33,12 +33,15 @@ static int fec_get_mac_addr(unsigned char *mac) { unsigned int value; - value = readl(MVF_IO_ADDRESS(MVF_OTP_CTRL_BASE_ADDR) + HW_OCOTP_MACn(0)); +#if 1 + value = 0x01; +#endif + //value = readl(MVF_IO_ADDRESS(MVF_OTP_CTRL_BASE_ADDR) + HW_OCOTP_MACn(0)); mac[5] = value & 0xff; mac[4] = (value >> 8) & 0xff; mac[3] = (value >> 16) & 0xff; mac[2] = (value >> 24) & 0xff; - value = readl(MVF_IO_ADDRESS(MVF_OTP_CTRL_BASE_ADDR) + HW_OCOTP_MACn(1)); + //value = readl(MVF_IO_ADDRESS(MVF_OTP_CTRL_BASE_ADDR) + HW_OCOTP_MACn(1)); mac[1] = value & 0xff; mac[0] = (value >> 8) & 0xff; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fb0f8083b14d..3825ae0e966f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -252,6 +252,13 @@ config MXS_DMA Support the MXS DMA engine. This engine including APBH-DMA and APBX-DMA is integrated into Freescale i.MX23/28 chips. +config MVF_EDMA + bool "MVF eDMA support" + depends on ARCH_MVF + select DMA_ENGINE + help + Support the MVF eDMA engine. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 0b7457b456a9..06d265f8e770 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -26,3 +26,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o +obj-$(CONFIG_MVF_EDMA) += mvf_edma.o diff --git a/drivers/dma/mvf_edma.c b/drivers/dma/mvf_edma.c new file mode 100644 index 000000000000..37b5ac190b02 --- /dev/null +++ b/drivers/dma/mvf_edma.c @@ -0,0 +1,555 @@ +/* + * Copyright 2011-2012 Freescale Semiconductor, Inc. All Rights Reserved. + * eDMA driver + * + * based on drivers/dma/imx-sdma.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/semaphore.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/dmaengine.h> +#include <linux/delay.h> + +#include <asm/irq.h> +#include <mach/common.h> +#include <asm/mvf_edma.h> +#include <asm/mvf_edma_regs.h> + +#define MVF_MAX_DMA_ENGINE 1 +#define MVF_EDMA_CHANNELS (MVF_MAX_DMA_ENGINE*MVF_EACH_DMA_CHANNEL) +#define MVF_MAX_XFER_BYTES 2048 + +struct mvf_dma_chan { + struct mvf_dma_engine *mvf_dma; + struct dma_chan chan; + struct dma_async_tx_descriptor desc; + struct tasklet_struct tasklet; + dma_addr_t ccw_phys; + int desc_count; + dma_cookie_t last_completed; + enum dma_status status; + unsigned int flags; + void __iomem *chan_mem_base; +#define MVF_DMA_SG_LOOP (1 << 0) +}; + +struct mvf_dma_engine { + int dev_id; + unsigned int version; + void __iomem *base[MVF_MAX_DMA_ENGINE]; + struct clk *clk; + struct dma_device dma_device; + struct device_dma_parameters dma_parms; + struct mvf_dma_chan mvf_chans[MVF_EDMA_CHANNELS]; + int dma_irq[MVF_MAX_DMA_ENGINE]; + int err_irq[MVF_MAX_DMA_ENGINE]; +}; + + +static void mvf_dma_reset_chan(struct mvf_dma_chan *mvf_chan) +{ + void __iomem *base = mvf_chan->chan_mem_base; + int channel = mvf_chan->chan.chan_id % MVF_EACH_DMA_CHANNEL; + + MVF_EDMA_TCD_CSR(base, channel) = 0x0000; + +} + +static void mvf_dma_enable_chan(struct mvf_dma_chan *mvf_chan) +{ + void __iomem *base = mvf_chan->chan_mem_base; + int channel = mvf_chan->chan.chan_id % MVF_EACH_DMA_CHANNEL; + + MVF_EDMA_SERQ(base) = channel; + MVF_EDMA_SSRT(base) = channel; +} + +static void mvf_dma_disable_chan(struct mvf_dma_chan *mvf_chan) +{ + void __iomem *base = mvf_chan->chan_mem_base; + int channel = mvf_chan->chan.chan_id % MVF_EACH_DMA_CHANNEL; + + mvf_chan->status = DMA_SUCCESS; + MVF_EDMA_CEEI(base) = MVF_EDMA_CEEI_CEEI(channel); + +} + +static void mvf_dma_pause_chan(struct mvf_dma_chan *mvf_chan) +{ +// struct mvf_dma_engine *mvf_dma = mvf_chan->mvf_dma; +// int channel = mvf_chan->chan.chan_id % MVF_EACH_DMA_CHANNEL; + + // + // pause code + // + + mvf_chan->status = DMA_PAUSED; +} + +static void mvf_dma_resume_chan(struct mvf_dma_chan *mvf_chan) +{ +// struct mvf_dma_engine *mvf_dma = mvf_chan->mvf_dma; +// int channel = mvf_chan->chan.chan_id % MVF_EACH_DMA_CHANNEL; + + // resume code + mvf_chan->status = DMA_IN_PROGRESS; +} + +static dma_cookie_t mvf_dma_assign_cookie(struct mvf_dma_chan *mvf_chan) +{ + dma_cookie_t cookie = mvf_chan->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + mvf_chan->chan.cookie = cookie; + mvf_chan->desc.cookie = cookie; + + return cookie; +} + +static struct mvf_dma_chan *to_mvf_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct mvf_dma_chan, chan); +} + +static dma_cookie_t mvf_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct mvf_dma_chan *mvf_chan = to_mvf_dma_chan(tx->chan); + dma_cookie_t cookie = mvf_dma_assign_cookie(mvf_chan); + + // tx start + mvf_dma_enable_chan(mvf_chan); + + return cookie; +} + +static void mvf_dma_tasklet(unsigned long data) +{ + struct mvf_dma_chan *mvf_chan = (struct mvf_dma_chan *) data; + + if (mvf_chan->desc.callback) + mvf_chan->desc.callback(mvf_chan->desc.callback_param); +} + +struct mvf_dma_chan * +mvf_find_chan(struct mvf_dma_engine *mvf_dma, int eng_idx, int chan_id) +{ + int i; + + for ( i = 0; i < MVF_EDMA_CHANNELS; i ++){ + if ( mvf_dma->mvf_chans[i].chan.chan_id == (chan_id + (eng_idx*MVF_EACH_DMA_CHANNEL))){ + return &mvf_dma->mvf_chans[i]; + } + } + return 0; +} + +static irqreturn_t mvf_dma_int_handler(int irq, void *dev_id) +{ + int i,int_src,engine; + struct mvf_dma_engine *mvf_dma = dev_id; + struct mvf_dma_chan *mvf_chan; + void __iomem *base; + + base = 0; + + // find normal irq index + for (i = 0; i < MVF_MAX_DMA_ENGINE; i++) { + if ( mvf_dma->dma_irq[i] == irq){ + base = mvf_dma->base[i]; + engine = i; + } + } + + // fail safe + if (!base){ + printk("error irq\n"); + return IRQ_HANDLED; + } + + int_src = MVF_EDMA_INT(base); + for (i = 0; i < MVF_EACH_DMA_CHANNEL; i++) { + if ( int_src & (1 << i)){ + // find chan + mvf_chan = mvf_find_chan(mvf_dma, engine, i); + if (mvf_chan){ + mvf_chan->status = DMA_SUCCESS; + mvf_chan->last_completed = mvf_chan->desc.cookie; + + /* schedule tasklet on this channel */ + tasklet_schedule(&mvf_chan->tasklet); + } + } + } + MVF_EDMA_CINT(base) = MVF_EDMA_CINT_CAIR; + + return IRQ_HANDLED; +} + +static irqreturn_t mvf_dma_err_handler(int irq, void *dev_id) +{ + int i,err,engine; + struct mvf_dma_engine *mvf_dma = dev_id; + struct mvf_dma_chan *mvf_chan; + void __iomem *base; + + base = 0; + + for (i = 0; i < MVF_MAX_DMA_ENGINE; i++) { + if ( mvf_dma->err_irq[i] == irq){ + base = mvf_dma->base[i]; + engine = i; + } + } + + // fail safe + if (!base){ + printk("error irq\n"); + return IRQ_HANDLED; + } + + err = MVF_EDMA_ERR(base); + for (i = 0; i < MVF_EACH_DMA_CHANNEL; i++) { + if ( err & (1 << i)){ + mvf_chan = mvf_find_chan(mvf_dma, engine, i); + if (mvf_chan){ + mvf_chan->status = DMA_ERROR; + tasklet_schedule(&mvf_chan->tasklet); + } + } + } + MVF_EDMA_CERR(base) = MVF_EDMA_CERR_CAER; + + return IRQ_HANDLED; +} + + +static int mvf_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct mvf_dma_chan *mvf_chan = to_mvf_dma_chan(chan); +// struct mvf_dma_engine *mvf_dma = mvf_chan->mvf_dma; + + mvf_dma_reset_chan(mvf_chan); + + dma_async_tx_descriptor_init(&mvf_chan->desc, chan); + mvf_chan->desc.tx_submit = mvf_dma_tx_submit; + + /* the descriptor is ready */ + async_tx_ack(&mvf_chan->desc); + + return 0; +} + +static void mvf_dma_free_chan_resources(struct dma_chan *chan) +{ + struct mvf_dma_chan *mvf_chan = to_mvf_dma_chan(chan); +// struct mvf_dma_engine *mvf_dma = mvf_chan->mvf_dma; + + mvf_dma_disable_chan(mvf_chan); + +// free_irq(mvf_chan->chan_irq, mvf_dma); +} + + +void +mvf_edma_set_tcd_params(struct mvf_dma_chan *mvf_chan, u32 source, u32 dest, + u32 attr, u32 soff, u32 nbytes, u32 slast, + u32 citer, u32 biter, u32 doff, u32 dlast_sga, + int major_int, int disable_req) +{ +// struct mvf_dma_chan *mvf_chan = to_mvf_dma_chan(chan); +// struct mvf_dma_engine *mvf_dma = mvf_chan->mvf_dma; + + int channel = mvf_chan->chan.chan_id % MVF_EACH_DMA_CHANNEL; + void __iomem *base = mvf_chan->chan_mem_base; + + MVF_EDMA_TCD_SADDR(base, channel) = source; + MVF_EDMA_TCD_DADDR(base, channel) = dest; + MVF_EDMA_TCD_ATTR(base, channel) = attr; + MVF_EDMA_TCD_SOFF(base, channel) = MVF_EDMA_TCD_SOFF_SOFF(soff); + MVF_EDMA_TCD_NBYTES(base, channel) = MVF_EDMA_TCD_NBYTES_NBYTES(nbytes); + MVF_EDMA_TCD_SLAST(base, channel) = MVF_EDMA_TCD_SLAST_SLAST(slast); + MVF_EDMA_TCD_CITER(base, channel) = MVF_EDMA_TCD_CITER_CITER(citer); + MVF_EDMA_TCD_BITER(base, channel) = MVF_EDMA_TCD_BITER_BITER(biter); + MVF_EDMA_TCD_DOFF(base, channel) = MVF_EDMA_TCD_DOFF_DOFF(doff); + MVF_EDMA_TCD_DLAST_SGA(base, channel) = MVF_EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga); + + /* interrupt at the end of major loop */ + if (major_int) + MVF_EDMA_TCD_CSR(base,channel) |= MVF_EDMA_TCD_CSR_INT_MAJOR; + else + MVF_EDMA_TCD_CSR(base,channel) &= ~MVF_EDMA_TCD_CSR_INT_MAJOR; + + /* disable request at the end of major loop of transfer or not */ + if (disable_req) + MVF_EDMA_TCD_CSR(base,channel) |= MVF_EDMA_TCD_CSR_D_REQ; + else + MVF_EDMA_TCD_CSR(base,channel) &= ~MVF_EDMA_TCD_CSR_D_REQ; + + /* enable error interrupt */ + MVF_EDMA_SEEI(base) = MVF_EDMA_SEEI_SEEI(channel); +} + +static struct dma_async_tx_descriptor *mvf_dma_prep_memcpy +(struct dma_chan *chan, + dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct mvf_dma_chan *mvf_chan = to_mvf_dma_chan(chan); +// struct mvf_dma_engine *mvf_dma = mvf_chan->mvf_dma; +// int channel = mvf_chan->chan.chan_id; + + if (mvf_chan->status == DMA_IN_PROGRESS) + return NULL; + + mvf_chan->status = DMA_IN_PROGRESS; + + // chan_id +#if 1 + mvf_edma_set_tcd_params( + mvf_chan, + src, + dst, + (0 | MVF_EDMA_TCD_ATTR_SSIZE_32BIT | MVF_EDMA_TCD_ATTR_DSIZE_32BIT), + 0x04, + len, 0x0, 1, 1, + 0x04, 0x0, 0x1,0x0); + +#else + // channel control + if ( channel == 10){ + mvf_edma_set_tcd_params( + channel, + src, + dst, + (0 | MVF_EDMA_TCD_ATTR_SSIZE_32BIT | MVF_EDMA_TCD_ATTR_DSIZE_32BIT), + 0x04, + len, 0x0, 1, 1, + 0x04, 0x0, 0x1,0x0); + }else{ + } +#endif + + + mvf_chan->desc_count = 0; + + return &mvf_chan->desc; +} + + +static int mvf_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct mvf_dma_chan *mvf_chan = to_mvf_dma_chan(chan); + int ret = 0; + + switch (cmd) { + case DMA_TERMINATE_ALL: + mvf_dma_reset_chan(mvf_chan); + mvf_dma_disable_chan(mvf_chan); + break; + case DMA_PAUSE: + mvf_dma_pause_chan(mvf_chan); + break; + case DMA_RESUME: + mvf_dma_resume_chan(mvf_chan); + break; + default: + ret = -ENOSYS; + } + + return ret; +} + +static enum dma_status mvf_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct mvf_dma_chan *mvf_chan = to_mvf_dma_chan(chan); + dma_cookie_t last_used; + + last_used = chan->cookie; + dma_set_tx_state(txstate, mvf_chan->last_completed, last_used, 0); + + return mvf_chan->status; +} + +static void mvf_dma_issue_pending(struct dma_chan *chan) +{ + /* + * Nothing to do. We only have a single descriptor. + */ +} + +static int __init mvf_dma_init(struct mvf_dma_engine *mvf_dma) +{ + int i,ret,cnt; + u32 grp0_pri = MVF_EDMA_CR_GRP0PRI(0x00); + u32 grp1_pri = MVF_EDMA_CR_GRP1PRI(0x01); + + for (i = 0; i < MVF_MAX_DMA_ENGINE; i++) { + MVF_EDMA_CR(mvf_dma->base[i]) = (0 | grp0_pri | grp1_pri); + + // clear every tcd + for (cnt=0; cnt <MVF_EACH_DMA_CHANNEL; cnt ++){ + MVF_EDMA_TCD_CSR(mvf_dma->base[i], cnt) = 0x0000; + } + + ret = request_irq(mvf_dma->dma_irq[i], mvf_dma_int_handler,0, "mvf_dma", mvf_dma); + if( ret) return -EBUSY; + + ret = request_irq(mvf_dma->err_irq[i], mvf_dma_err_handler,0, "mvf_dmaerr", mvf_dma); + if( ret) return -EBUSY; + + } + return 0; + + +#if 0 + int ret; + + ret = clk_prepare_enable(mvf_dma->clk); + if (ret) + return ret; + + ret = mxs_reset_block(mvf_dma->base); + if (ret) + goto err_out; + +err_out: + return ret; +#endif +} + +static int __init mvf_dma_probe(struct platform_device *pdev) +{ + int ret, index,i; + struct mvf_dma_engine *mvf_dma; + struct resource *iores, *irq_res, *errirq_res; + + mvf_dma = kzalloc(sizeof(*mvf_dma), GFP_KERNEL); + if (!mvf_dma) + return -ENOMEM; + + for(i = 0; i< MVF_MAX_DMA_ENGINE; i ++){ + iores = platform_get_resource(pdev, IORESOURCE_MEM, i); + + if (!request_mem_region(iores->start, resource_size(iores), + pdev->name)) { + ret = -EBUSY; + goto err_request_region; + } + + mvf_dma->base[i] = ioremap(iores->start, resource_size(iores)); + if (!mvf_dma->base) { + ret = -ENOMEM; + goto err_ioremap; + } + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i*2+0); + if (!irq_res) + return -ENODEV; + + mvf_dma->dma_irq[i]=irq_res->start; + + errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i*2+1); + if (!errirq_res) + return -ENODEV; + mvf_dma->err_irq[i]=errirq_res->start; + } + +#if 0 + mvf_dma->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(mvf_dma->clk)) { + ret = PTR_ERR(mvf_dma->clk); + goto err_clk; + } +#endif + + dma_cap_set(DMA_MEMCPY, mvf_dma->dma_device.cap_mask); + + INIT_LIST_HEAD(&mvf_dma->dma_device.channels); + + /* Initialize channel parameters */ + for (i = 0; i < MVF_EDMA_CHANNELS; i++) { + struct mvf_dma_chan *mvf_chan = &mvf_dma->mvf_chans[i]; + + index = i / MVF_EACH_DMA_CHANNEL; + + mvf_chan->mvf_dma = mvf_dma; + mvf_chan->chan.device = &mvf_dma->dma_device; + mvf_chan->chan_mem_base = mvf_dma->base[index]; + + tasklet_init(&mvf_chan->tasklet, mvf_dma_tasklet, + (unsigned long) mvf_chan); + + + /* Add the channel to mvf_chan list */ + list_add_tail(&mvf_chan->chan.device_node, + &mvf_dma->dma_device.channels); + } + + ret = mvf_dma_init(mvf_dma); + if (ret) + goto err_init; + + mvf_dma->dma_device.dev = &pdev->dev; + + mvf_dma->dma_device.dev->dma_parms = &mvf_dma->dma_parms; + dma_set_max_seg_size(mvf_dma->dma_device.dev, MVF_MAX_XFER_BYTES); + + mvf_dma->dma_device.device_alloc_chan_resources = mvf_dma_alloc_chan_resources; + mvf_dma->dma_device.device_free_chan_resources = mvf_dma_free_chan_resources; + mvf_dma->dma_device.device_tx_status = mvf_dma_tx_status; + mvf_dma->dma_device.device_prep_dma_memcpy = mvf_dma_prep_memcpy; + mvf_dma->dma_device.device_control = mvf_dma_control; + mvf_dma->dma_device.device_issue_pending = mvf_dma_issue_pending; + + ret = dma_async_device_register(&mvf_dma->dma_device); + if (ret) { + dev_err(mvf_dma->dma_device.dev, "unable to register\n"); + goto err_init; + } + + dev_info(mvf_dma->dma_device.dev, "initialized\n"); + + return 0; + +err_init: +#if 0 + clk_put(mvf_dma->clk); +err_clk: + for (i = 0; i < MVF_EDMA_CHANNELS; i++) if (mvf_dma->base[i]) iounmap(mvf_dma->base[i]); +#endif +err_ioremap: + release_mem_region(iores->start, resource_size(iores)); +err_request_region: + kfree(mvf_dma); + return ret; +} + +static struct platform_driver mvf_dma_driver = { + .driver = { + .name = "mvf-edma", + }, +}; + +static int __init mvf_dma_module_init(void) +{ + return platform_driver_probe(&mvf_dma_driver, mvf_dma_probe); +} +subsys_initcall(mvf_dma_module_init); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index cec12046b8a7..5e82d608dbc5 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1951,6 +1951,13 @@ config FEC Say Y here if you want to use the built-in 10/100 Fast ethernet controller on some Motorola ColdFire and Freescale i.MX processors. +config MVF_ETHER_SWITCH + bool "Ethernet switch controller (of MVF CPU)" + depends on ARCH_MVF + help + Say Y here if you want to use the built-in ethernet switch + controller on MVF processor. + config FEC_1588 bool "Enable FEC 1588 timestamping" depends on FEC diff --git a/drivers/net/Makefile b/drivers/net/Makefile index f7e26baa26a3..13f2024a117e 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -137,6 +137,10 @@ endif ifeq ($(CONFIG_ARCH_MX6),y) obj-$(CONFIG_FEC_1588) += fec_1588.o endif +ifeq ($(CONFIG_ARCH_MVF),y) + obj-$(CONFIG_FEC_1588) += fec_1588.o +endif +obj-$(CONFIG_MVF_ETHER_SWITCH) += mvf_switch.o obj-$(CONFIG_68360_ENET) += 68360enet.o obj-$(CONFIG_WD80x3) += wd.o 8390.o obj-$(CONFIG_EL2) += 3c503.o 8390p.o diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 872b7c4c5cc6..750fca841890 100755 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -1427,7 +1427,7 @@ fec_restart(struct net_device *dev, int duplex) fep->ptimer_present = 0; reg = 0x0; } else -#if defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6) +#if defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6) || defined(CONFIG_ARCH_VF600) reg = 0x00000010; #else reg = 0x0; @@ -1457,13 +1457,14 @@ fec_restart(struct net_device *dev, int duplex) /* ENET enable */ val = reg | (0x1 << 1); +#ifndef CONFIG_ARCH_VF600 /* if phy work at 1G mode, set ENET RGMII speed to 1G */ if (fep->phy_dev && (fep->phy_dev->supported & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) && fep->phy_interface == PHY_INTERFACE_MODE_RGMII && fep->phy_dev->speed == SPEED_1000) val |= (0x1 << 5); - +#endif if (cpu_is_mx6()) { /* enable endian swap */ val |= (0x1 << 8); diff --git a/drivers/net/fec.h b/drivers/net/fec.h index 1c4063c05ec2..b62ea23fe9d9 100644 --- a/drivers/net/fec.h +++ b/drivers/net/fec.h @@ -49,7 +49,7 @@ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ /* Define the FEC 1588 registers offset */ -#if defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6) +#if defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6) || defined(CONFIG_ARCH_MVF) #define FEC_ATIME_CTRL 0x400 #define FEC_ATIME 0x404 #define FEC_ATIME_EVT_OFFSET 0x408 @@ -87,7 +87,7 @@ #endif /* CONFIG_M5272 */ -#if (defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6)) \ +#if (defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6)) || defined(CONFIG_ARCH_MVF) \ && defined(CONFIG_FEC_1588) #define CONFIG_ENHANCED_BD #endif diff --git a/drivers/net/fec_1588.c b/drivers/net/fec_1588.c index 43b60941cc99..be65d61f9798 100644 --- a/drivers/net/fec_1588.c +++ b/drivers/net/fec_1588.c @@ -40,7 +40,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ptp_tx_ts_wait); #if defined(CONFIG_ARCH_MX28) static struct fec_ptp_private *ptp_private[2]; -#elif defined(CONFIG_ARCH_MX6) +#elif defined(CONFIG_ARCH_MX6) || defined(CONFIG_ARCH_MVF) static struct fec_ptp_private *ptp_private[1]; #endif @@ -575,7 +575,7 @@ static void fec_handle_ptpdrift(struct ptp_set_comp *comp, struct ptp_time_correct *ptc) { u32 ndrift; - u32 i, adj_inc, adj_period; + u32 i, adj_inc, adj_period=0; u32 tmp_current, tmp_winner; ndrift = comp->drift; diff --git a/drivers/net/mvf_switch.c b/drivers/net/mvf_switch.c new file mode 100644 index 000000000000..b2a943756aa5 --- /dev/null +++ b/drivers/net/mvf_switch.c @@ -0,0 +1,4535 @@ +/* + * L2 switch Controller (Etheren switch) driver for MVF family. + * + * based on L2 switch Controller for MCF5441x. + * + * Copyright (C) 2010-2012 Freescale Semiconductor, Inc. All Rights Reserved. + * Shrek Wu (B16972@freescale.com) + * Wang Huan (b18965@freescale.com) + * Jason Jin (Jason.jin@freescale.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/platform_device.h> +#include <linux/fsl_devices.h> +#include <linux/phy.h> +#include <linux/kthread.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/signal.h> + +#include <asm/irq.h> +#include <asm/pgtable.h> + + +#include <linux/sched.h> +#include <asm/cacheflush.h> +#include <asm/mvf_switch.h> +#include "mvf_switch.h" + +#define SWITCH_MAX_PORTS 1 +#define CONFIG_FEC_SHARED_PHY +#define FEC_PHY + +#if (defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6)) || defined(CONFIG_ARCH_MVF) \ + && defined(CONFIG_FEC_1588) +#define CONFIG_ENHANCED_BD +#endif + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ +#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ +#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ + +static int switch_enet_open(struct net_device *dev); +static int switch_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t switch_enet_interrupt(int irq, void *dev_id); +static void switch_enet_tx(struct net_device *dev); +static void switch_enet_rx(struct net_device *dev); +static int switch_enet_close(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void switch_restart(struct net_device *dev, int duplex); +static void switch_stop(struct net_device *dev); +static void switch_set_mac_address(struct net_device *dev); + +#define NMII 20 + +/* Make MII read/write commands for the FEC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ + (VAL & 0xffff)) + +/* Transmitter timeout. +*/ +#define TX_TIMEOUT (2*HZ) + +/*last read entry from learning interface*/ +eswPortInfo g_info; +/* switch ports status */ +struct port_status ports_link_status; + +/* the user space pid, used to send the link change to user space */ +long user_pid = 1; + +/* ----------------------------------------------------------------*/ +/* + * Calculate Galois Field Arithmetic CRC for Polynom x^8+x^2+x+1. + * It omits the final shift in of 8 zeroes a "normal" CRC would do + * (getting the remainder). + * + * Examples (hexadecimal values):<br> + * 10-11-12-13-14-15 => CRC=0xc2 + * 10-11-cc-dd-ee-00 => CRC=0xe6 + * + * param: pmacaddress + * A 6-byte array with the MAC address. + * The first byte is the first byte transmitted + * return The 8-bit CRC in bits 7:0 + */ +int crc8_calc(unsigned char *pmacaddress) +{ + /* byte index */ + int byt; + /* bit index */ + int bit; + int inval; + int crc; + /* preset */ + crc = 0x12; + for (byt = 0; byt < 6; byt++) { + inval = (((int)pmacaddress[byt]) & 0xff); + /* + * shift bit 0 to bit 8 so all our bits + * travel through bit 8 + * (simplifies below calc) + */ + inval <<= 8; + + for (bit = 0; bit < 8; bit++) { + /* next input bit comes into d7 after shift */ + crc |= inval & 0x100; + if (crc & 0x01) + /* before shift */ + crc ^= 0x1c0; + + crc >>= 1; + inval >>= 1; + } + + } + /* upper bits are clean as we shifted in zeroes! */ + return crc; +} + +void read_atable(struct switch_enet_private *fep, + int index, + unsigned long *read_lo, unsigned long *read_hi) +{ +// unsigned long atable_base = 0xFC0E0000; + unsigned long atable_base = (long)fep->hwentry; + + *read_lo = *((volatile unsigned long *)(atable_base + (index<<3))); + *read_hi = *((volatile unsigned long *)(atable_base + (index<<3) + 4)); +} + +void write_atable(struct switch_enet_private *fep, + int index, + unsigned long write_lo, unsigned long write_hi) +{ +// unsigned long atable_base = 0xFC0E0000; + unsigned long atable_base = (long)fep->hwentry; + + *((volatile unsigned long *)(atable_base + (index<<3))) = write_lo; + *((volatile unsigned long *)(atable_base + (index<<3) + 4)) = write_hi; +} + +/* Check if the Port Info FIFO has data available + * for reading. 1 valid, 0 invalid*/ +int esw_portinfofifo_status(struct switch_enet_private *fep) +{ + volatile switch_t *fecp; + fecp = fep->hwp; + return fecp->ESW_LSR; +} + +/* Initialize the Port Info FIFO. */ +void esw_portinfofifo_initialize( + struct switch_enet_private *fep) +{ + volatile switch_t *fecp; + unsigned long tmp; + fecp = fep->hwp; + + /*disable all learn*/ + fecp->switch_imask &= (~MCF_ESW_IMR_LRN); + /* remove all entries from FIFO */ + while (esw_portinfofifo_status(fep)) { + /* read one data word */ + tmp = fecp->ESW_LREC0; + tmp = fecp->ESW_LREC1; + } + +} + +/* Read one element from the HW receive FIFO (Queue) + * if available and return it. + * return ms_HwPortInfo or null if no data is available + */ +eswPortInfo *esw_portinfofifo_read( + struct switch_enet_private *fep) +{ + volatile switch_t *fecp; + unsigned long tmp; + + fecp = fep->hwp; + /* check learning record valid */ + if (fecp->ESW_LSR == 0) + return NULL; + + /*read word from FIFO*/ + g_info.maclo = fecp->ESW_LREC0; + + /*but verify that we actually did so + * (0=no data available)*/ + if (g_info.maclo == 0) + return NULL; + + /* read 2nd word from FIFO */ + tmp = fecp->ESW_LREC1; + g_info.machi = tmp & 0xffff; + g_info.hash = (tmp >> 16) & 0xff; + g_info.port = (tmp >> 24) & 0xf; + + return &g_info; +} + + +/* + * Clear complete MAC Look Up Table + */ +void esw_clear_atable(struct switch_enet_private *fep) +{ + int index; + for (index = 0; index < 2048; index++) + write_atable(fep, index, 0, 0); +} + +void esw_dump_atable(struct switch_enet_private *fep) +{ + int index; + unsigned long read_lo, read_hi; + for (index = 0; index < 2048; index++) { + read_atable(fep, index, &read_lo, &read_hi); + } + +} + +/* + * pdates MAC address lookup table with a static entry + * Searches if the MAC address is already there in the block and replaces + * the older entry with new one. If MAC address is not there then puts a + * new entry in the first empty slot available in the block + * + * mac_addr Pointer to the array containing MAC address to + * be put as static entry + * port Port bitmask numbers to be added in static entry, + * valid values are 1-7 + * priority Priority for the static entry in table + * + * return 0 for a successful update else -1 when no slot available + */ +int esw_update_atable_static(unsigned char *mac_addr, + unsigned int port, unsigned int priority, + struct switch_enet_private *fep) +{ + unsigned long block_index, entry, index_end; + unsigned long read_lo, read_hi; + unsigned long write_lo, write_hi; + + write_lo = (unsigned long)((mac_addr[3] << 24) | + (mac_addr[2] << 16) | + (mac_addr[1] << 8) | + mac_addr[0]); + write_hi = (unsigned long)(0 | + (port << AT_SENTRY_PORTMASK_shift) | + (priority << AT_SENTRY_PRIO_shift) | + (AT_ENTRY_TYPE_STATIC << AT_ENTRY_TYPE_shift) | + (AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift) | + (mac_addr[5] << 8) | (mac_addr[4])); + + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr)); + index_end = block_index + ATABLE_ENTRY_PER_SLOT; + /* Now search all the entries in the selected block */ + for (entry = block_index; entry < index_end; entry++) { + read_atable(fep, entry, &read_lo, &read_hi); + /* + * MAC address matched, so update the + * existing entry + * even if its a dynamic one + */ + if ((read_lo == write_lo) && + ((read_hi & 0x0000ffff) == + (write_hi & 0x0000ffff))) { + write_atable(fep, entry, write_lo, write_hi); + return 0; + } else if (!(read_hi & (1 << 16))) { + /* + * Fill this empty slot (valid bit zero), + * assuming no holes in the block + */ + write_atable(fep, entry, write_lo, write_hi); + fep->atCurrEntries++; + return 0; + } + } + + /* No space available for this static entry */ + return -1; +} + +/* lookup entry in given Address Table slot and + * insert (learn) it if it is not found. + * return 0 if entry was found and updated. + * 1 if entry was not found and has been inserted (learned). + */ +int esw_update_atable_dynamic(unsigned char *mac_addr, + unsigned int port, unsigned int currTime, + struct switch_enet_private *fep) +{ + unsigned long block_index, entry, index_end; + unsigned long read_lo, read_hi; + unsigned long write_lo, write_hi; + unsigned long tmp; + int time, timeold, indexold; + + /* prepare update port and timestamp */ + write_hi = (mac_addr[5] << 8) | (mac_addr[4]); + write_lo = (unsigned long)((mac_addr[3] << 24) | + (mac_addr[2] << 16) | + (mac_addr[1] << 8) | + mac_addr[0]); + tmp = AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift; + tmp |= AT_ENTRY_TYPE_DYNAMIC << AT_ENTRY_TYPE_shift; + tmp |= currTime << AT_DENTRY_TIME_shift; + tmp |= port << AT_DENTRY_PORT_shift; + tmp |= write_hi; + + /* + * linear search through all slot + * entries and update if found + */ + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr)); + index_end = block_index + ATABLE_ENTRY_PER_SLOT; + /* Now search all the entries in the selected block */ + for (entry = block_index; entry < index_end; entry++) { + read_atable(fep, entry, &read_lo, &read_hi); + + if ((read_lo == write_lo) && + ((read_hi & 0x0000ffff) == + (write_hi & 0x0000ffff))) { + /* found correct address, + * update timestamp. */ + write_atable(fep, entry, write_lo, tmp); + return 0; + } else if (!(read_hi & (1 << 16))) { + /* slot is empty, then use it + * for new entry + * Note: There are no holes, + * therefore cannot be any + * more that need to be compared. + */ + write_atable(fep, entry, write_lo, tmp); + /* statistics (we do it between writing + * .hi an .lo due to + * hardware limitation... + */ + fep->atCurrEntries++; + /* newly inserted */ + return 1; + } + } + + /* + * no more entry available in blockk ... + * overwrite oldest + */ + timeold = 0; + indexold = 0; + for (entry = block_index; entry < index_end; entry++) { + read_atable(fep, entry, &read_lo, &read_hi); + time = AT_EXTRACT_TIMESTAMP(read_hi); + time = TIMEDELTA(currTime, time); + if (time > timeold) { + /* is it older ?*/ + timeold = time; + indexold = entry; + } + } + + write_atable(fep, indexold, write_lo, tmp); + /* Statistics (do it inbetween + * writing to .lo and .hi*/ + fep->atBlockOverflows++; + /* newly inserted */ + return 1; + +} + +int esw_update_atable_dynamic1(unsigned long write_lo, + unsigned long write_hi, int block_index, + unsigned int port, unsigned int currTime, + struct switch_enet_private *fep) +{ + unsigned long entry, index_end; + unsigned long read_lo, read_hi; + unsigned long tmp; + int time, timeold, indexold; + + /* prepare update port and timestamp */ + tmp = AT_ENTRY_RECORD_VALID << AT_ENTRY_VALID_shift; + tmp |= AT_ENTRY_TYPE_DYNAMIC << AT_ENTRY_TYPE_shift; + tmp |= currTime << AT_DENTRY_TIME_shift; + tmp |= port << AT_DENTRY_PORT_shift; + tmp |= write_hi; + + /* + * linear search through all slot + * entries and update if found + */ + index_end = block_index + ATABLE_ENTRY_PER_SLOT; + /* Now search all the entries in the selected block */ + for (entry = block_index; entry < index_end; entry++) { + read_atable(fep, entry, &read_lo, &read_hi); + if ((read_lo == write_lo) && + ((read_hi & 0x0000ffff) == + (write_hi & 0x0000ffff))) { + /* found correct address, + * update timestamp. */ + write_atable(fep, entry, write_lo, tmp); + return 0; + } else if (!(read_hi & (1 << 16))) { + /* slot is empty, then use it + * for new entry + * Note: There are no holes, + * therefore cannot be any + * more that need to be compared. + */ + write_atable(fep, entry, write_lo, tmp); + /* statistics (we do it between writing + * .hi an .lo due to + * hardware limitation... + */ + fep->atCurrEntries++; + /* newly inserted */ + return 1; + } + } + + /* + * no more entry available in block ... + * overwrite oldest + */ + timeold = 0; + indexold = 0; + for (entry = block_index; entry < index_end; entry++) { + read_atable(fep, entry, &read_lo, &read_hi); + time = AT_EXTRACT_TIMESTAMP(read_hi); + time = TIMEDELTA(currTime, time); + if (time > timeold) { + /* is it older ?*/ + timeold = time; + indexold = entry; + } + } + + write_atable(fep, indexold, write_lo, tmp); + /* Statistics (do it inbetween + * writing to .lo and .hi*/ + fep->atBlockOverflows++; + /* newly inserted */ + return 1; +} + +/* + * Delete one dynamic entry within the given block + * of 64-bit entries. + * return number of valid entries in the block after deletion. + */ +int esw_del_atable_dynamic(struct switch_enet_private *fep, + int blockidx, int entryidx) +{ + unsigned long index_start, index_end; + int i; + unsigned long read_lo, read_hi; + + /* the entry to delete */ + index_start = blockidx + entryidx; + /* one after last */ + index_end = blockidx + ATABLE_ENTRY_PER_SLOT; + /* Statistics */ + fep->atCurrEntries--; + + if (entryidx == (ATABLE_ENTRY_PER_SLOT - 1)) { + /* if it is the very last entry, + * just delete it without further efford*/ + write_atable(fep, index_start, 0, 0); + /*number of entries left*/ + i = ATABLE_ENTRY_PER_SLOT - 1; + return i; + } else { + /*not the last in the block, then + * shift all that follow the one + * that is deleted to avoid "holes". + */ + for (i = index_start; i < (index_end - 1); i++) { + read_atable(fep, i + 1, &read_lo, &read_hi); + /* move it down */ + write_atable(fep, i, read_lo, read_hi); + if (!(read_hi & (1 << 16))) { + /* stop if we just copied the last */ + return i - blockidx; + } + } + + /*moved all entries up to the last. + * then set invalid flag in the last*/ + write_atable(fep, index_end - 1, 0, 0); + /* number of valid entries left */ + return i - blockidx; + } + +} + +void esw_atable_dynamicms_del_entries_for_port( + struct switch_enet_private *fep, + int port_index) +{ + unsigned long read_lo, read_hi; + unsigned int port_idx; + int i; + + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) { + read_atable(fep, i, &read_lo, &read_hi); + if (read_hi & (1 << 16)) { + port_idx = AT_EXTRACT_PORT(read_hi); + + if (port_idx == port_index) + write_atable(fep, i, 0, 0); + } + } + +} + +void esw_atable_dynamicms_del_entries_for_other_port( + struct switch_enet_private *fep, + int port_index) +{ + unsigned long read_lo, read_hi; + unsigned int port_idx; + int i; + + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) { + read_atable(fep, i, &read_lo, &read_hi); + if (read_hi & (1 << 16)) { + port_idx = AT_EXTRACT_PORT(read_hi); + + if (port_idx != port_index) + write_atable(fep, i, 0, 0); + } + } + +} + +/* + * Scan one complete block (Slot) for outdated entries and delete them. + * blockidx index of block of entries that should be analyzed. + * return number of deleted entries, 0 if nothing was modified. + */ +int esw_atable_dynamicms_check_block_age( + struct switch_enet_private *fep, int blockidx) { + + int i, tm, tdelta; + int deleted = 0, entries = 0; + unsigned long read_lo, read_hi; + /* Scan all entries from last down to + * have faster deletion speed if necessary*/ + for (i = (blockidx + ATABLE_ENTRY_PER_SLOT - 1); + i >= blockidx; i--) { + read_atable(fep, i, &read_lo, &read_hi); + + if (read_hi & (1 << 16)) { + /* the entry is valide*/ + tm = AT_EXTRACT_TIMESTAMP(read_hi); + tdelta = TIMEDELTA(fep->currTime, tm); + if (tdelta > fep->ageMax) { + esw_del_atable_dynamic(fep, + blockidx, i-blockidx); + deleted++; + } else { + /* statistics */ + entries++; + } + } + } + + /*update statistics*/ + if (fep->atMaxEntriesPerBlock < entries) + fep->atMaxEntriesPerBlock = entries; + + return deleted; +} + +/* scan the complete address table and find the most current entry. + * The time of the most current entry then is used as current time + * for the context structure. + * In addition the atCurrEntries value is updated as well. + * return time that has been set in the context. + */ +int esw_atable_dynamicms_find_set_latesttime( + struct switch_enet_private *fep) { + + int tm_min, tm_max, tm; + int delta, current_val, i; + unsigned long read_lo, read_hi; + + tm_min = (1 << AT_DENTRY_TIMESTAMP_WIDTH) - 1; + tm_max = 0; + current_val = 0; + + for (i = 0; i < ESW_ATABLE_MEM_NUM_ENTRIES; i++) { + read_atable(fep, i, &read_lo, &read_hi); + if (read_hi & (1 << 16)) { + /*the entry is valid*/ + tm = AT_EXTRACT_TIMESTAMP(read_hi); + if (tm > tm_max) + tm_max = tm; + if (tm < tm_min) + tm_min = tm; + current_val++; + } + } + + delta = TIMEDELTA(tm_max, tm_min); + if (delta < fep->ageMax) { + /*Difference must be in range*/ + fep->currTime = tm_max; + } else { + fep->currTime = tm_min; + } + + fep->atCurrEntries = current_val; + return fep->currTime; +} + +int esw_atable_dynamicms_get_port( + struct switch_enet_private *fep, + unsigned long write_lo, + unsigned long write_hi, + int block_index) +{ + + int i, index_end; + unsigned long read_lo, read_hi, port; + + index_end = block_index + ATABLE_ENTRY_PER_SLOT; + /* Now search all the entries in the selected block */ + for (i = block_index; i < index_end; i++) { + read_atable(fep, i, &read_lo, &read_hi); + + if ((read_lo == write_lo) && + ((read_hi & 0x0000ffff) == + (write_hi & 0x0000ffff))) { + /* found correct address,*/ + if (read_hi & (1 << 16)) { + /*extract the port index from the valid entry*/ + port = AT_EXTRACT_PORT(read_hi); + return port; + } + } + } + + return -1; + +} + +/* Get the port index from the source MAC address + * of the received frame + * @return port index + */ +int esw_atable_dynamicms_get_portindex_from_mac( + struct switch_enet_private *fep, + unsigned char *mac_addr, + unsigned long write_lo, + unsigned long write_hi) +{ + + int blockIdx; + int rc; + /*compute the block index*/ + blockIdx = GET_BLOCK_PTR(crc8_calc(mac_addr)); + /* Get the ingress port index of the received BPDU */ + rc = esw_atable_dynamicms_get_port(fep, + write_lo, write_hi, blockIdx); + + return rc; + +} + +/* dynamicms MAC address table learn and migration*/ +int esw_atable_dynamicms_learn_migration( + struct switch_enet_private *fep, + int currTime) +{ + eswPortInfo *pESWPortInfo; + int index; + int inserted = 0; + + pESWPortInfo = esw_portinfofifo_read(fep); + /* Anything to learn */ + if (pESWPortInfo != 0) { + /*get block index from lookup table*/ + index = GET_BLOCK_PTR(pESWPortInfo->hash); + inserted = esw_update_atable_dynamic1( + pESWPortInfo->maclo, + pESWPortInfo->machi, index, + pESWPortInfo->port, currTime, fep); + } + + return 0; + +} +/* -----------------------------------------------------------------*/ +/* + * esw_forced_forward + * The frame is forwared to the forced destination ports. + * It only replace the MAC lookup function, + * all other filtering(eg.VLAN verification) act as normal + */ +int esw_forced_forward(struct switch_enet_private *fep, + int port1, int port2, int enable) +{ + unsigned long tmp = 0; + volatile switch_t *fecp; + + fecp = fep->hwp; + + /* Enable Forced forwarding for port num */ + if ((port1 == 1) && (port2 == 1)) + tmp |= MCF_ESW_P0FFEN_FD(3); + else if (port1 == 1) + /*Enable Forced forwarding for port 1 only*/ + tmp |= MCF_ESW_P0FFEN_FD(1); + else if (port2 == 1) + /*Enable Forced forwarding for port 2 only*/ + tmp |= MCF_ESW_P0FFEN_FD(2); + else { + printk(KERN_ERR "%s:do not support " + "the forced forward mode" + "port1 %x port2 %x\n", + __func__, port1, port2); + return -1; + } + + if (enable == 1) + tmp |= MCF_ESW_P0FFEN_FEN; + else if (enable == 0) + tmp &= ~MCF_ESW_P0FFEN_FEN; + else { + printk(KERN_ERR "%s: the enable %x is error\n", + __func__, enable); + return -2; + } + + fecp->ESW_P0FFEN = tmp; + return 0; +} + +void esw_get_forced_forward( + struct switch_enet_private *fep, + unsigned long *ulForceForward) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulForceForward = fecp->ESW_P0FFEN; +} + +void esw_get_port_enable( + struct switch_enet_private *fep, + unsigned long *ulPortEnable) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulPortEnable = fecp->ESW_PER; +} +/* + * enable or disable port n tx or rx + * tx_en 0 disable port n tx + * tx_en 1 enable port n tx + * rx_en 0 disbale port n rx + * rx_en 1 enable port n rx + */ +int esw_port_enable_config(struct switch_enet_private *fep, + int port, int tx_en, int rx_en) +{ + unsigned long tmp = 0; + volatile switch_t *fecp; + + fecp = fep->hwp; + tmp = fecp->ESW_PER; + if (tx_en == 1) { + if (port == 0) + tmp |= MCF_ESW_PER_TE0; + else if (port == 1) + tmp |= MCF_ESW_PER_TE1; + else if (port == 2) + tmp |= MCF_ESW_PER_TE2; + else { + printk(KERN_ERR "%s:do not support the" + " port %x tx enable\n", + __func__, port); + return -1; + } + } else if (tx_en == 0) { + if (port == 0) + tmp &= (~MCF_ESW_PER_TE0); + else if (port == 1) + tmp &= (~MCF_ESW_PER_TE1); + else if (port == 2) + tmp &= (~MCF_ESW_PER_TE2); + else { + printk(KERN_ERR "%s:do not support " + "the port %x tx disable\n", + __func__, port); + return -2; + } + } else { + printk(KERN_ERR "%s:do not support the port %x" + " tx op value %x\n", + __func__, port, tx_en); + return -3; + } + + if (rx_en == 1) { + if (port == 0) + tmp |= MCF_ESW_PER_RE0; + else if (port == 1) + tmp |= MCF_ESW_PER_RE1; + else if (port == 2) + tmp |= MCF_ESW_PER_RE2; + else { + printk(KERN_ERR "%s:do not support the " + "port %x rx enable\n", + __func__, port); + return -4; + } + } else if (rx_en == 0) { + if (port == 0) + tmp &= (~MCF_ESW_PER_RE0); + else if (port == 1) + tmp &= (~MCF_ESW_PER_RE1); + else if (port == 2) + tmp &= (~MCF_ESW_PER_RE2); + else { + printk(KERN_ERR "%s:do not support the " + "port %x rx disable\n", + __func__, port); + return -5; + } + } else { + printk(KERN_ERR "%s:do not support the port %x" + " rx op value %x\n", + __func__, port, tx_en); + return -6; + } + + fecp->ESW_PER = tmp; + return 0; +} + + +void esw_get_port_broadcast( + struct switch_enet_private *fep, + unsigned long *ulPortBroadcast) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulPortBroadcast = fecp->ESW_DBCR; +} + +int esw_port_broadcast_config( + struct switch_enet_private *fep, + int port, int enable) +{ + unsigned long tmp = 0; + volatile switch_t *fecp; + + fecp = fep->hwp; + + if ((port > 2) || (port < 0)) { + printk(KERN_ERR "%s:do not support the port %x" + " default broadcast\n", + __func__, port); + return -1; + } + + tmp = fecp->ESW_DBCR; + if (enable == 1) { + if (port == 0) + tmp |= MCF_ESW_DBCR_P0; + else if (port == 1) + tmp |= MCF_ESW_DBCR_P1; + else if (port == 2) + tmp |= MCF_ESW_DBCR_P2; + } else if (enable == 0) { + if (port == 0) + tmp &= ~MCF_ESW_DBCR_P0; + else if (port == 1) + tmp &= ~MCF_ESW_DBCR_P1; + else if (port == 2) + tmp &= ~MCF_ESW_DBCR_P2; + } + + fecp->ESW_DBCR = tmp; + return 0; +} + + +void esw_get_port_multicast( + struct switch_enet_private *fep, + unsigned long *ulPortMulticast) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulPortMulticast = fecp->ESW_DMCR; +} + +int esw_port_multicast_config( + struct switch_enet_private *fep, + int port, int enable) +{ + unsigned long tmp = 0; + volatile switch_t *fecp; + + fecp = fep->hwp; + + if ((port > 2) || (port < 0)) { + printk(KERN_ERR "%s:do not support the port %x" + " default broadcast\n", + __func__, port); + return -1; + } + + tmp = fecp->ESW_DMCR; + if (enable == 1) { + if (port == 0) + tmp |= MCF_ESW_DMCR_P0; + else if (port == 1) + tmp |= MCF_ESW_DMCR_P1; + else if (port == 2) + tmp |= MCF_ESW_DMCR_P2; + } else if (enable == 0) { + if (port == 0) + tmp &= ~MCF_ESW_DMCR_P0; + else if (port == 1) + tmp &= ~MCF_ESW_DMCR_P1; + else if (port == 2) + tmp &= ~MCF_ESW_DMCR_P2; + } + + fecp->ESW_DMCR = tmp; + return 0; +} + + +void esw_get_port_blocking( + struct switch_enet_private *fep, + unsigned long *ulPortBlocking) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulPortBlocking = (fecp->ESW_BKLR & 0x0000000f); +} + +int esw_port_blocking_config( + struct switch_enet_private *fep, + int port, int enable) +{ + unsigned long tmp = 0; + volatile switch_t *fecp; + + fecp = fep->hwp; + + if ((port > 2) || (port < 0)) { + printk(KERN_ERR "%s:do not support the port %x" + " default broadcast\n", + __func__, port); + return -1; + } + + tmp = fecp->ESW_BKLR; + if (enable == 1) { + if (port == 0) + tmp |= MCF_ESW_BKLR_BE0; + else if (port == 1) + tmp |= MCF_ESW_BKLR_BE1; + else if (port == 2) + tmp |= MCF_ESW_BKLR_BE2; + } else if (enable == 0) { + if (port == 0) + tmp &= ~MCF_ESW_BKLR_BE0; + else if (port == 1) + tmp &= ~MCF_ESW_BKLR_BE1; + else if (port == 2) + tmp &= ~MCF_ESW_BKLR_BE2; + } + + fecp->ESW_BKLR = tmp; + return 0; +} + + +void esw_get_port_learning( + struct switch_enet_private *fep, + unsigned long *ulPortLearning) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulPortLearning = (fecp->ESW_BKLR & 0x000f0000) >> 16; +} + +int esw_port_learning_config( + struct switch_enet_private *fep, + int port, int disable) +{ + unsigned long tmp = 0; + volatile switch_t *fecp; + + fecp = fep->hwp; + + if ((port > 2) || (port < 0)) { + printk(KERN_ERR "%s:do not support the port %x" + " default broadcast\n", + __func__, port); + return -1; + } + + tmp = fecp->ESW_BKLR; + if (disable == 0) { + fep->learning_irqhandle_enable = 0; + if (port == 0) + tmp |= MCF_ESW_BKLR_LD0; + else if (port == 1) + tmp |= MCF_ESW_BKLR_LD1; + else if (port == 2) + tmp |= MCF_ESW_BKLR_LD2; + } else if (disable == 1) { + if (port == 0) + tmp &= ~MCF_ESW_BKLR_LD0; + else if (port == 1) + tmp &= ~MCF_ESW_BKLR_LD1; + else if (port == 2) + tmp &= ~MCF_ESW_BKLR_LD2; + } + + fecp->ESW_BKLR = tmp; + return 0; +} +/*********************************************************************/ +void esw_mac_lookup_table_range(struct switch_enet_private *fep) +{ + int index; + unsigned long read_lo, read_hi; + /* Pointer to switch address look up memory*/ + for (index = 0; index < 2048; index++) + write_atable(fep, index, index, (~index)); + + /* Pointer to switch address look up memory*/ + for (index = 0; index < 2048; index++) { + read_atable(fep, index, &read_lo, &read_hi); + if (read_lo != index) { + printk(KERN_ERR "%s:Mismatch at low %d\n", + __func__, index); + return; + } + + if (read_hi != (~index)) { + printk(KERN_ERR "%s:Mismatch at high %d\n", + __func__, index); + return; + } + } +} + + +/* + * Checks IP Snoop options of handling the snooped frame. + * mode 0 : The snooped frame is forward only to management port + * mode 1 : The snooped frame is copy to management port and + * normal forwarding is checked. + * mode 2 : The snooped frame is discarded. + * mode 3 : Disable the ip snoop function + * ip_header_protocol : the IP header protocol field + */ +int esw_ip_snoop_config(struct switch_enet_private *fep, + int mode, unsigned long ip_header_protocol) +{ + volatile switch_t *fecp; + unsigned long tmp = 0, protocol_type = 0; + int num = 0; + + fecp = fep->hwp; + /* Config IP Snooping */ + if (mode == 0) { + /* Enable IP Snooping */ + tmp = MCF_ESW_IPSNP_EN; + tmp |= MCF_ESW_IPSNP_MODE(0);/*For Forward*/ + } else if (mode == 1) { + /* Enable IP Snooping */ + tmp = MCF_ESW_IPSNP_EN; + /*For Forward and copy_to_mangmnt_port*/ + tmp |= MCF_ESW_IPSNP_MODE(1); + } else if (mode == 2) { + /* Enable IP Snooping */ + tmp = MCF_ESW_IPSNP_EN; + tmp |= MCF_ESW_IPSNP_MODE(2);/*discard*/ + } else if (mode == 3) { + /* disable IP Snooping */ + tmp = MCF_ESW_IPSNP_EN; + tmp &= ~MCF_ESW_IPSNP_EN; + } else { + printk(KERN_ERR "%s: the mode %x " + "we do not support\n", __func__, mode); + return -1; + } + + protocol_type = ip_header_protocol; + for (num = 0; num < 8; num++) { + if (protocol_type == + AT_EXTRACT_IP_PROTOCOL(fecp->ESW_IPSNP[num])) { + fecp->ESW_IPSNP[num] = + tmp | MCF_ESW_IPSNP_PROTOCOL(protocol_type); + break; + } else if (!(fecp->ESW_IPSNP[num])) { + fecp->ESW_IPSNP[num] = + tmp | MCF_ESW_IPSNP_PROTOCOL(protocol_type); + break; + } + } + if (num == 8) { + printk(KERN_INFO "IP snooping table is full\n"); + return 0; + } + + return 0; +} + +void esw_get_ip_snoop_config( + struct switch_enet_private *fep, + unsigned long *ulpESW_IPSNP) +{ + int i; + volatile switch_t *fecp; + + fecp = fep->hwp; + for (i = 0; i < 8; i++) + *(ulpESW_IPSNP + i) = fecp->ESW_IPSNP[i]; +} +/* + * Checks TCP/UDP Port Snoop options of handling the snooped frame. + * mode 0 : The snooped frame is forward only to management port + * mode 1 : The snooped frame is copy to management port and + * normal forwarding is checked. + * mode 2 : The snooped frame is discarded. + * mode 3 : Disable the TCP/UDP port snoop function + * compare_port : port number in the TCP/UDP header + * compare_num 1: TCP/UDP source port number is compared + * compare_num 2: TCP/UDP destination port number is compared + * compare_num 3: TCP/UDP source and destination port number is compared + */ +int esw_tcpudp_port_snoop_config(struct switch_enet_private *fep, + int mode, int compare_port, int compare_num) +{ + volatile switch_t *fecp; + unsigned long tmp; + int num; + + fecp = fep->hwp; + + /* Enable TCP/UDP port Snooping */ + tmp = MCF_ESW_PSNP_EN; + if (mode == 0) + tmp |= MCF_ESW_PSNP_MODE(0);/*For Forward*/ + else if (mode == 1)/*For Forward and copy_to_mangmnt_port*/ + tmp |= MCF_ESW_PSNP_MODE(1); + else if (mode == 2) + tmp |= MCF_ESW_PSNP_MODE(2);/*discard*/ + else if (mode == 3) /*disable the port function*/ + tmp &= (~MCF_ESW_PSNP_EN); + else { + printk(KERN_ERR "%s: the mode %x we do not support\n", + __func__, mode); + return -1; + } + + if (compare_num == 1) + tmp |= MCF_ESW_PSNP_CS; + else if (compare_num == 2) + tmp |= MCF_ESW_PSNP_CD; + else if (compare_num == 3) + tmp |= MCF_ESW_PSNP_CD | MCF_ESW_PSNP_CS; + else { + printk(KERN_ERR "%s: the compare port address %x" + " we do not support\n", + __func__, compare_num); + return -1; + } + + for (num = 0; num < 8; num++) { + if (compare_port == + AT_EXTRACT_TCP_UDP_PORT(fecp->ESW_PSNP[num])) { + fecp->ESW_PSNP[num] = + tmp | MCF_ESW_PSNP_PORT_COMPARE(compare_port); + break; + } else if (!(fecp->ESW_PSNP[num])) { + fecp->ESW_PSNP[num] = + tmp | MCF_ESW_PSNP_PORT_COMPARE(compare_port); + break; + } + } + if (num == 8) { + printk(KERN_INFO "TCP/UDP port snooping table is full\n"); + return 0; + } + + return 0; +} + +void esw_get_tcpudp_port_snoop_config( + struct switch_enet_private *fep, + unsigned long *ulpESW_PSNP) +{ + int i; + volatile switch_t *fecp; + + fecp = fep->hwp; + for (i = 0; i < 8; i++) + *(ulpESW_PSNP + i) = fecp->ESW_PSNP[i]; +} +/*-----------------mirror----------------------------------------*/ +void esw_get_port_mirroring(struct switch_enet_private *fep) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + + printk(KERN_INFO "Mirror Port: %1ld Egress Port Match:%s " + "Ingress Port Match:%s\n", fecp->ESW_MCR & 0xf, + (fecp->ESW_MCR >> 6) & 1 ? "Y" : "N", + (fecp->ESW_MCR >> 5) & 1 ? "Y" : "N"); + + if ((fecp->ESW_MCR >> 6) & 1) + printk(KERN_INFO "Egress Port to be mirrored: Port %ld\n", + fecp->ESW_EGMAP >> 1); + if ((fecp->ESW_MCR >> 5) & 1) + printk(KERN_INFO "Ingress Port to be mirrored: Port %ld\n", + fecp->ESW_INGMAP >> 1); + + printk(KERN_INFO "Egress Des Address Match:%s " + "Egress Src Address Match:%s\n", + (fecp->ESW_MCR >> 10) & 1 ? "Y" : "N", + (fecp->ESW_MCR >> 9) & 1 ? "Y" : "N"); + printk(KERN_INFO "Ingress Des Address Match:%s " + "Ingress Src Address Match:%s\n", + (fecp->ESW_MCR >> 8) & 1 ? "Y" : "N", + (fecp->ESW_MCR >> 7) & 1 ? "Y" : "N"); + + if ((fecp->ESW_MCR >> 10) & 1) + printk(KERN_INFO "Egress Des Address to be mirrored: " + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n", + fecp->ESW_ENGDAL & 0xff, (fecp->ESW_ENGDAL >> 8) & 0xff, + (fecp->ESW_ENGDAL >> 16) & 0xff, + (fecp->ESW_ENGDAL >> 24) & 0xff, + fecp->ESW_ENGDAH & 0xff, + (fecp->ESW_ENGDAH >> 8) & 0xff); + if ((fecp->ESW_MCR >> 9) & 1) + printk("Egress Src Address to be mirrored: " + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n", + fecp->ESW_ENGSAL & 0xff, (fecp->ESW_ENGSAL >> 8) & 0xff, + (fecp->ESW_ENGSAL >> 16) & 0xff, + (fecp->ESW_ENGSAL >> 24) & 0xff, + fecp->ESW_ENGSAH & 0xff, + (fecp->ESW_ENGSAH >> 8) & 0xff); + if ((fecp->ESW_MCR >> 8) & 1) + printk("Ingress Des Address to be mirrored: " + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n", + fecp->ESW_INGDAL & 0xff, (fecp->ESW_INGDAL >> 8) & 0xff, + (fecp->ESW_INGDAL >> 16) & 0xff, + (fecp->ESW_INGDAL >> 24) & 0xff, + fecp->ESW_INGDAH & 0xff, + (fecp->ESW_INGDAH >> 8) & 0xff); + if ((fecp->ESW_MCR >> 7) & 1) + printk("Ingress Src Address to be mirrored: " + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx\n", + fecp->ESW_INGSAL & 0xff, (fecp->ESW_INGSAL >> 8) & 0xff, + (fecp->ESW_INGSAL >> 16) & 0xff, + (fecp->ESW_INGSAL >> 24) & 0xff, + fecp->ESW_INGSAH & 0xff, + (fecp->ESW_INGSAH >> 8) & 0xff); +} + +int esw_port_mirroring_config_port_match(struct switch_enet_private *fep, + int mirror_port, int port_match_en, int port) +{ + volatile switch_t *fecp; + unsigned long tmp = 0; + + fecp = fep->hwp; + + tmp = fecp->ESW_MCR; + if (mirror_port != (tmp & 0xf)) + tmp = 0; + + switch (port_match_en) { + case MIRROR_EGRESS_PORT_MATCH: + tmp |= MCF_ESW_MCR_EGMAP; + if (port == 0) + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG0; + else if (port == 1) + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG1; + else if (port == 2) + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG2; + break; + case MIRROR_INGRESS_PORT_MATCH: + tmp |= MCF_ESW_MCR_INGMAP; + if (port == 0) + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING0; + else if (port == 1) + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING1; + else if (port == 2) + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING2; + break; + default: + tmp = 0; + break; + } + + tmp = tmp & 0x07e0; + if (port_match_en) + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port); + + fecp->ESW_MCR = tmp; + return 0; +} + +int esw_port_mirroring_config(struct switch_enet_private *fep, + int mirror_port, int port, int mirror_enable, + unsigned char *src_mac, unsigned char *des_mac, + int egress_en, int ingress_en, + int egress_mac_src_en, int egress_mac_des_en, + int ingress_mac_src_en, int ingress_mac_des_en) +{ + volatile switch_t *fecp; + unsigned long tmp; + + fecp = fep->hwp; + + /*mirroring config*/ + tmp = 0; + if (egress_en == 1) { + tmp |= MCF_ESW_MCR_EGMAP; + if (port == 0) + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG0; + else if (port == 1) + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG1; + else if (port == 2) + fecp->ESW_EGMAP = MCF_ESW_EGMAP_EG2; + else { + printk(KERN_ERR "%s: the port %x we do not support\n", + __func__, port); + return -1; + } + } else if (egress_en == 0) { + tmp &= (~MCF_ESW_MCR_EGMAP); + } else { + printk(KERN_ERR "%s: egress_en %x we do not support\n", + __func__, egress_en); + return -1; + } + + if (ingress_en == 1) { + tmp |= MCF_ESW_MCR_INGMAP; + if (port == 0) + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING0; + else if (port == 1) + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING1; + else if (port == 2) + fecp->ESW_INGMAP = MCF_ESW_INGMAP_ING2; + else { + printk(KERN_ERR "%s: the port %x we do not support\n", + __func__, port); + return -1; + } + } else if (ingress_en == 0) { + tmp &= ~MCF_ESW_MCR_INGMAP; + } else{ + printk(KERN_ERR "%s: ingress_en %x we do not support\n", + __func__, ingress_en); + return -1; + } + + if (egress_mac_src_en == 1) { + tmp |= MCF_ESW_MCR_EGSA; + fecp->ESW_ENGSAH = (src_mac[5] << 8) | (src_mac[4]); + fecp->ESW_ENGSAL = (unsigned long)((src_mac[3] << 24) | + (src_mac[2] << 16) | + (src_mac[1] << 8) | + src_mac[0]); + } else if (egress_mac_src_en == 0) { + tmp &= ~MCF_ESW_MCR_EGSA; + } else { + printk(KERN_ERR "%s: egress_mac_src_en %x we do not support\n", + __func__, egress_mac_src_en); + return -1; + } + + if (egress_mac_des_en == 1) { + tmp |= MCF_ESW_MCR_EGDA; + fecp->ESW_ENGDAH = (des_mac[5] << 8) | (des_mac[4]); + fecp->ESW_ENGDAL = (unsigned long)((des_mac[3] << 24) | + (des_mac[2] << 16) | + (des_mac[1] << 8) | + des_mac[0]); + } else if (egress_mac_des_en == 0) { + tmp &= ~MCF_ESW_MCR_EGDA; + } else { + printk(KERN_ERR "%s: egress_mac_des_en %x we do not support\n", + __func__, egress_mac_des_en); + return -1; + } + + if (ingress_mac_src_en == 1) { + tmp |= MCF_ESW_MCR_INGSA; + fecp->ESW_INGSAH = (src_mac[5] << 8) | (src_mac[4]); + fecp->ESW_INGSAL = (unsigned long)((src_mac[3] << 24) | + (src_mac[2] << 16) | + (src_mac[1] << 8) | + src_mac[0]); + } else if (ingress_mac_src_en == 0) { + tmp &= ~MCF_ESW_MCR_INGSA; + } else { + printk(KERN_ERR "%s: ingress_mac_src_en %x we do not support\n", + __func__, ingress_mac_src_en); + return -1; + } + + if (ingress_mac_des_en == 1) { + tmp |= MCF_ESW_MCR_INGDA; + fecp->ESW_INGDAH = (des_mac[5] << 8) | (des_mac[4]); + fecp->ESW_INGDAL = (unsigned long)((des_mac[3] << 24) | + (des_mac[2] << 16) | + (des_mac[1] << 8) | + des_mac[0]); + } else if (ingress_mac_des_en == 0) { + tmp &= ~MCF_ESW_MCR_INGDA; + } else { + printk(KERN_ERR "%s: ingress_mac_des_en %x we do not support\n", + __func__, ingress_mac_des_en); + return -1; + } + + if (mirror_enable == 1) + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port); + else if (mirror_enable == 0) + tmp &= ~MCF_ESW_MCR_MEN; + else + printk(KERN_ERR "%s: the mirror enable %x is error\n", + __func__, mirror_enable); + + + fecp->ESW_MCR = tmp; + return 0; +} + +int esw_port_mirroring_config_addr_match(struct switch_enet_private *fep, + int mirror_port, int addr_match_enable, unsigned char *mac_addr) +{ + volatile switch_t *fecp; + unsigned long tmp = 0; + + fecp = fep->hwp; + + tmp = fecp->ESW_MCR; + if (mirror_port != (tmp & 0xf)) + tmp = 0; + + switch (addr_match_enable) { + case MIRROR_EGRESS_SOURCE_MATCH: + tmp |= MCF_ESW_MCR_EGSA; + fecp->ESW_ENGSAH = (mac_addr[5] << 8) | (mac_addr[4]); + fecp->ESW_ENGSAL = (unsigned long)((mac_addr[3] << 24) | + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]); + break; + case MIRROR_INGRESS_SOURCE_MATCH: + tmp |= MCF_ESW_MCR_INGSA; + fecp->ESW_INGSAH = (mac_addr[5] << 8) | (mac_addr[4]); + fecp->ESW_INGSAL = (unsigned long)((mac_addr[3] << 24) | + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]); + break; + case MIRROR_EGRESS_DESTINATION_MATCH: + tmp |= MCF_ESW_MCR_EGDA; + fecp->ESW_ENGDAH = (mac_addr[5] << 8) | (mac_addr[4]); + fecp->ESW_ENGDAL = (unsigned long)((mac_addr[3] << 24) | + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]); + break; + case MIRROR_INGRESS_DESTINATION_MATCH: + tmp |= MCF_ESW_MCR_INGDA; + fecp->ESW_INGDAH = (mac_addr[5] << 8) | (mac_addr[4]); + fecp->ESW_INGDAL = (unsigned long)((mac_addr[3] << 24) | + (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]); + break; + default: + tmp = 0; + break; + } + + tmp = tmp & 0x07e0; + if (addr_match_enable) + tmp |= MCF_ESW_MCR_MEN | MCF_ESW_MCR_PORT(mirror_port); + + fecp->ESW_MCR = tmp; + return 0; +} + +void esw_get_vlan_verification( + struct switch_enet_private *fep, + unsigned long *ulValue) +{ + volatile switch_t *fecp; + fecp = fep->hwp; + *ulValue = fecp->ESW_VLANV; +} + +int esw_set_vlan_verification( + struct switch_enet_private *fep, int port, + int vlan_domain_verify_en, + int vlan_discard_unknown_en) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + if ((port < 0) || (port > 2)) { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, port); + return -1; + } + + if (vlan_domain_verify_en == 1) { + if (port == 0) + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV0; + else if (port == 1) + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV1; + else if (port == 2) + fecp->ESW_VLANV |= MCF_ESW_VLANV_VV2; + } else if (vlan_domain_verify_en == 0) { + if (port == 0) + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV0; + else if (port == 1) + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV1; + else if (port == 2) + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_VV2; + } else { + printk(KERN_INFO "%s: donot support " + "vlan_domain_verify %x\n", + __func__, vlan_domain_verify_en); + return -2; + } + + if (vlan_discard_unknown_en == 1) { + if (port == 0) + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU0; + else if (port == 1) + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU1; + else if (port == 2) + fecp->ESW_VLANV |= MCF_ESW_VLANV_DU2; + } else if (vlan_discard_unknown_en == 0) { + if (port == 0) + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU0; + else if (port == 1) + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU1; + else if (port == 2) + fecp->ESW_VLANV &= ~MCF_ESW_VLANV_DU2; + } else { + printk(KERN_INFO "%s: donot support " + "vlan_discard_unknown %x\n", + __func__, vlan_discard_unknown_en); + return -3; + } + + return 0; +} + +void esw_get_vlan_resolution_table( + struct switch_enet_private *fep, + struct eswVlanTableItem *tableaddr) +{ + volatile switch_t *fecp; + int vnum = 0; + int i; + + fecp = fep->hwp; + for (i = 0; i < 32; i++) { + if (fecp->ESW_VRES[i]) { + tableaddr->table[i].port_vlanid = + fecp->ESW_VRES[i] >> 3; + tableaddr->table[i].vlan_domain_port = + fecp->ESW_VRES[i] & 7; + vnum++; + } + } + tableaddr->valid_num = vnum; +} + +int esw_set_vlan_id(struct switch_enet_private *fep, + unsigned long configData) +{ + volatile switch_t *fecp; + int i; + + fecp = fep->hwp; + + for (i = 0; i < 32; i++) { + if (fecp->ESW_VRES[i] == 0) { + fecp->ESW_VRES[i] = MCF_ESW_VRES_VLANID(configData); + return 0; + } else if (((fecp->ESW_VRES[i] >> 3) & 0xfff) == configData) { + printk(KERN_INFO "The VLAN already exists\n"); + return 0; + } + } + + printk(KERN_INFO "The VLAN can't create, because VLAN table is full\n"); + return 0; +} + +int esw_set_vlan_id_cleared(struct switch_enet_private *fep, + unsigned long configData) +{ + volatile switch_t *fecp; + int i; + + fecp = fep->hwp; + + for (i = 0; i < 32; i++) { + if (((fecp->ESW_VRES[i] >> 3) & 0xfff) == configData) { + fecp->ESW_VRES[i] = 0; + break; + } + } + return 0; +} + +int esw_set_port_in_vlan_id(struct switch_enet_private *fep, + eswIoctlVlanResoultionTable configData) +{ + volatile switch_t *fecp; + int i; + int lastnum = 0; + + fecp = fep->hwp; + + for (i = 0; i < 32; i++) { + if (fecp->ESW_VRES[i] == 0) { + lastnum = i; + break; + } else if (((fecp->ESW_VRES[i] >> 3) & 0xfff) == + configData.port_vlanid) { + /* update the port members of this vlan */ + fecp->ESW_VRES[i] |= 1 << configData.vlan_domain_port; + return 0; + } + } + /* creat a new vlan in vlan table */ + fecp->ESW_VRES[lastnum] = MCF_ESW_VRES_VLANID(configData.port_vlanid) | + (1 << configData.vlan_domain_port); + return 0; +} + +int esw_set_vlan_resolution_table( + struct switch_enet_private *fep, + unsigned short port_vlanid, + int vlan_domain_num, + int vlan_domain_port) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + if ((vlan_domain_num < 0) + || (vlan_domain_num > 31)) { + printk(KERN_ERR "%s: do not support the " + "vlan_domain_num %d\n", + __func__, vlan_domain_num); + return -1; + } + + if ((vlan_domain_port < 0) + || (vlan_domain_port > 7)) { + printk(KERN_ERR "%s: do not support the " + "vlan_domain_port %d\n", + __func__, vlan_domain_port); + return -2; + } + + fecp->ESW_VRES[vlan_domain_num] = + MCF_ESW_VRES_VLANID(port_vlanid) + | vlan_domain_port; + + return 0; +} + +void esw_get_vlan_input_config( + struct switch_enet_private *fep, + eswIoctlVlanInputStatus *pVlanInputConfig) +{ + volatile switch_t *fecp; + int i; + + fecp = fep->hwp; + for (i = 0; i < 3; i++) + pVlanInputConfig->ESW_PID[i] = fecp->ESW_PID[i]; + + pVlanInputConfig->ESW_VLANV = fecp->ESW_VLANV; + pVlanInputConfig->ESW_VIMSEL = fecp->ESW_VIMSEL; + pVlanInputConfig->ESW_VIMEN = fecp->ESW_VIMEN; + + for (i = 0; i < 32; i++) + pVlanInputConfig->ESW_VRES[i] = fecp->ESW_VRES[i]; +} + + +int esw_vlan_input_process(struct switch_enet_private *fep, + int port, int mode, unsigned short port_vlanid) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + + if ((mode < 0) || (mode > 5)) { + printk(KERN_ERR "%s: do not support the" + " VLAN input processing mode %d\n", + __func__, mode); + return -1; + } + + if ((port < 0) || (port > 3)) { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, mode); + return -2; + } + + fecp->ESW_PID[port] = MCF_ESW_PID_VLANID(port_vlanid); + if (port == 0) { + if (mode == 4) + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN0; + else + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN0; + + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM0(3); + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM0(mode); + } else if (port == 1) { + if (mode == 4) + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN1; + else + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN1; + + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM1(3); + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM1(mode); + } else if (port == 2) { + if (mode == 4) + fecp->ESW_VIMEN &= ~MCF_ESW_VIMEN_EN2; + else + fecp->ESW_VIMEN |= MCF_ESW_VIMEN_EN2; + + fecp->ESW_VIMSEL &= ~MCF_ESW_VIMSEL_IM2(3); + fecp->ESW_VIMSEL |= MCF_ESW_VIMSEL_IM2(mode); + } else { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, port); + return -2; + } + + return 0; +} + +void esw_get_vlan_output_config(struct switch_enet_private *fep, + unsigned long *ulVlanOutputConfig) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulVlanOutputConfig = fecp->ESW_VOMSEL; +} + +int esw_vlan_output_process(struct switch_enet_private *fep, + int port, int mode) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + + if ((port < 0) || (port > 2)) { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, mode); + return -1; + } + + if (port == 0) { + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM0(3); + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM0(mode); + } else if (port == 1) { + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM1(3); + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM1(mode); + } else if (port == 2) { + fecp->ESW_VOMSEL &= ~MCF_ESW_VOMSEL_OM2(3); + fecp->ESW_VOMSEL |= MCF_ESW_VOMSEL_OM2(mode); + } else { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, port); + return -1; + } + + return 0; +} + +/*------------frame calssify and priority resolution------------*/ +/*vlan priority lookup*/ +int esw_framecalssify_vlan_priority_lookup( + struct switch_enet_private *fep, + int port, int func_enable, + int vlan_pri_table_num, + int vlan_pri_table_value) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + + if ((port < 0) || (port > 3)) { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, port); + return -1; + } + + if (func_enable == 0) { + fecp->ESW_PRES[port] &= ~MCF_ESW_PRES_VLAN; + printk(KERN_ERR "%s: disable port %d VLAN priority " + "lookup function\n", __func__, port); + return 0; + } + + if ((vlan_pri_table_num < 0) || (vlan_pri_table_num > 7)) { + printk(KERN_ERR "%s: do not support the priority %d\n", + __func__, vlan_pri_table_num); + return -1; + } + + fecp->ESW_PVRES[port] |= ((vlan_pri_table_value & 0x3) + << (vlan_pri_table_num*3)); + /* enable port VLAN priority lookup function*/ + fecp->ESW_PRES[port] |= MCF_ESW_PRES_VLAN; + return 0; +} + +int esw_framecalssify_ip_priority_lookup( + struct switch_enet_private *fep, + int port, int func_enable, int ipv4_en, + int ip_priority_num, + int ip_priority_value) +{ + volatile switch_t *fecp; + unsigned long tmp = 0, tmp_prio = 0; + + fecp = fep->hwp; + + if ((port < 0) || (port > 3)) { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, port); + return -1; + } + + if (func_enable == 0) { + fecp->ESW_PRES[port] &= ~MCF_ESW_PRES_IP; + printk(KERN_ERR "%s: disable port %d ip priority " + "lookup function\n", __func__, port); + return 0; + } + + /* IPV4 priority 64 entry table lookup*/ + /* IPv4 head 6 bit TOS field*/ + if (ipv4_en == 1) { + if ((ip_priority_num < 0) || (ip_priority_num > 63)) { + printk(KERN_ERR "%s: do not support the table entry %d\n", + __func__, ip_priority_num); + return -2; + } + } else { /* IPV6 priority 256 entry table lookup*/ + /* IPv6 head 8 bit COS field*/ + if ((ip_priority_num < 0) || (ip_priority_num > 255)) { + printk(KERN_ERR "%s: do not support the table entry %d\n", + __func__, ip_priority_num); + return -3; + } + } + + /* IP priority table lookup : address*/ + tmp = MCF_ESW_IPRES_ADDRESS(ip_priority_num); + /* IP priority table lookup : ipv4sel*/ + if (ipv4_en == 1) + tmp = tmp | MCF_ESW_IPRES_IPV4SEL; + /* IP priority table lookup : priority*/ + if (port == 0) + tmp |= MCF_ESW_IPRES_PRI0(ip_priority_value); + else if (port == 1) + tmp |= MCF_ESW_IPRES_PRI1(ip_priority_value); + else if (port == 2) + tmp |= MCF_ESW_IPRES_PRI2(ip_priority_value); + + /* configure*/ + fecp->ESW_IPRES = MCF_ESW_IPRES_READ | + MCF_ESW_IPRES_ADDRESS(ip_priority_num); + tmp_prio = fecp->ESW_IPRES; + + fecp->ESW_IPRES = tmp | tmp_prio; + + fecp->ESW_IPRES = MCF_ESW_IPRES_READ | + MCF_ESW_IPRES_ADDRESS(ip_priority_num); + tmp_prio = fecp->ESW_IPRES; + + /* enable port IP priority lookup function*/ + fecp->ESW_PRES[port] |= MCF_ESW_PRES_IP; + return 0; +} + +int esw_framecalssify_mac_priority_lookup( + struct switch_enet_private *fep, + int port) +{ + volatile switch_t *fecp; + + if ((port < 0) || (port > 3)) { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, port); + return -1; + } + + fecp = fep->hwp; + fecp->ESW_PRES[port] |= MCF_ESW_PRES_MAC; + + return 0; +} + +int esw_frame_calssify_priority_init( + struct switch_enet_private *fep, + int port, unsigned char priority_value) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + + if ((port < 0) || (port > 3)) { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, port); + return -1; + } + /*disable all priority lookup function*/ + fecp->ESW_PRES[port] = 0; + fecp->ESW_PRES[port] = MCF_ESW_PRES_DFLT_PRI(priority_value & 0x7); + + return 0; +} + +/*---------------------------------------------------------------------------*/ +int esw_get_statistics_status( + struct switch_enet_private *fep, + esw_statistics_status *pStatistics) +{ + volatile switch_t *fecp; + fecp = fep->hwp; + + pStatistics->ESW_DISCN = fecp->ESW_DISCN; + pStatistics->ESW_DISCB = fecp->ESW_DISCB; + pStatistics->ESW_NDISCN = fecp->ESW_NDISCN; + pStatistics->ESW_NDISCB = fecp->ESW_NDISCB; + return 0; +} + +int esw_get_port_statistics_status( + struct switch_enet_private *fep, + int port, + esw_port_statistics_status *pPortStatistics) +{ + volatile switch_t *fecp; + + if ((port < 0) || (port > 3)) { + printk(KERN_ERR "%s: do not support the port %d\n", + __func__, port); + return -1; + } + + fecp = fep->hwp; + + pPortStatistics->MCF_ESW_POQC = + fecp->port_statistics_status[port].MCF_ESW_POQC; + pPortStatistics->MCF_ESW_PMVID = + fecp->port_statistics_status[port].MCF_ESW_PMVID; + pPortStatistics->MCF_ESW_PMVTAG = + fecp->port_statistics_status[port].MCF_ESW_PMVTAG; + pPortStatistics->MCF_ESW_PBL = + fecp->port_statistics_status[port].MCF_ESW_PBL; + return 0; +} +/*----------------------------------------------------------------------*/ +int esw_get_output_queue_status( + struct switch_enet_private *fep, + esw_output_queue_status *pOutputQueue) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + pOutputQueue->ESW_MMSR = fecp->ESW_MMSR; + pOutputQueue->ESW_LMT = fecp->ESW_LMT; + pOutputQueue->ESW_LFC = fecp->ESW_LFC; + pOutputQueue->ESW_IOSR = fecp->ESW_IOSR; + pOutputQueue->ESW_PCSR = fecp->ESW_PCSR; + pOutputQueue->ESW_QWT = fecp->ESW_QWT; + pOutputQueue->ESW_P0BCT = fecp->ESW_P0BCT; + return 0; +} + +/* set output queue memory status and configure*/ +int esw_set_output_queue_memory( + struct switch_enet_private *fep, + int fun_num, + esw_output_queue_status *pOutputQueue) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + + if (fun_num == 1) { + /* memory manager status*/ + fecp->ESW_MMSR = pOutputQueue->ESW_MMSR; + } else if (fun_num == 2) { + /*low memory threshold*/ + fecp->ESW_LMT = pOutputQueue->ESW_LMT; + } else if (fun_num == 3) { + /*lowest number of free cells*/ + fecp->ESW_LFC = pOutputQueue->ESW_LFC; + } else if (fun_num == 4) { + /*queue weights*/ + fecp->ESW_QWT = pOutputQueue->ESW_QWT; + } else if (fun_num == 5) { + /*port 0 backpressure congenstion thresled*/ + fecp->ESW_P0BCT = pOutputQueue->ESW_P0BCT; + } else { + printk(KERN_ERR "%s: do not support the cmd %x\n", + __func__, fun_num); + return -1; + } + return 0; +} +/*--------------------------------------------------------------------*/ +int esw_get_irq_status( + struct switch_enet_private *fep, + eswIoctlIrqStatus *pIrqStatus) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + pIrqStatus->isr = fecp->switch_ievent; + pIrqStatus->imr = fecp->switch_imask; + pIrqStatus->rx_buf_pointer = fecp->fec_r_des_start; + pIrqStatus->tx_buf_pointer = fecp->fec_x_des_start; + pIrqStatus->rx_max_size = fecp->fec_r_buff_size; + pIrqStatus->rx_buf_active = fecp->fec_r_des_active; + pIrqStatus->tx_buf_active = fecp->fec_x_des_active; + return 0; +} + +int esw_set_irq_mask( + struct switch_enet_private *fep, + unsigned long mask, int enable) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + + if (enable == 1) + fecp->switch_imask |= mask; + else if (enable == 1) + fecp->switch_imask &= (~mask); + else { + printk(KERN_INFO "%s: enable %lx is error value\n", + __func__, mask); + return -1; + } + return 0; +} + +void esw_clear_irq_event( + struct switch_enet_private *fep, + unsigned long mask) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + fecp->switch_ievent |= mask; +} + +void esw_get_switch_mode( + struct switch_enet_private *fep, + unsigned long *ulModeConfig) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulModeConfig = fecp->ESW_MODE; +} + +void esw_switch_mode_configure( + struct switch_enet_private *fep, + unsigned long configure) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + fecp->ESW_MODE |= configure; +} + + +void esw_get_bridge_port( + struct switch_enet_private *fep, + unsigned long *ulBMPConfig) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + *ulBMPConfig = fecp->ESW_BMPC; +} + +void esw_bridge_port_configure( + struct switch_enet_private *fep, + unsigned long configure) +{ + volatile switch_t *fecp; + + fecp = fep->hwp; + fecp->ESW_BMPC = configure; +} + +int esw_get_port_all_status(struct switch_enet_private *fep, + unsigned char portnum, struct port_all_status *port_alstatus) +{ + volatile switch_t *fecp; + unsigned long PortBlocking; + unsigned long PortLearning; + unsigned long VlanVerify; + unsigned long DiscardUnknown; + unsigned long MultiReso; + unsigned long BroadReso; + unsigned long FTransmit; + unsigned long FReceive; + + fecp = fep->hwp; + PortBlocking = fecp->ESW_BKLR & 0x0000000f; + PortLearning = (fecp->ESW_BKLR & 0x000f0000) >> 16; + VlanVerify = fecp->ESW_VLANV & 0x0000000f; + DiscardUnknown = (fecp->ESW_VLANV & 0x000f0000) >> 16; + MultiReso = fecp->ESW_DMCR & 0x0000000f; + BroadReso = fecp->ESW_DBCR & 0x0000000f; + FTransmit = fecp->ESW_PER & 0x0000000f; + FReceive = (fecp->ESW_PER & 0x000f0000) >> 16; + + switch (portnum) { + case 0: + port_alstatus->link_status = 1; + port_alstatus->block_status = PortBlocking & 1; + port_alstatus->learn_status = PortLearning & 1; + port_alstatus->vlan_verify = VlanVerify & 1; + port_alstatus->discard_unknown = DiscardUnknown & 1; + port_alstatus->multi_reso = MultiReso & 1; + port_alstatus->broad_reso = BroadReso & 1; + port_alstatus->ftransmit = FTransmit & 1; + port_alstatus->freceive = FReceive & 1; + break; + case 1: + port_alstatus->link_status = + ports_link_status.port1_link_status; + port_alstatus->block_status = (PortBlocking >> 1) & 1; + port_alstatus->learn_status = (PortLearning >> 1) & 1; + port_alstatus->vlan_verify = (VlanVerify >> 1) & 1; + port_alstatus->discard_unknown = (DiscardUnknown >> 1) & 1; + port_alstatus->multi_reso = (MultiReso >> 1) & 1; + port_alstatus->broad_reso = (BroadReso >> 1) & 1; + port_alstatus->ftransmit = (FTransmit >> 1) & 1; + port_alstatus->freceive = (FReceive >> 1) & 1; + break; + case 2: + port_alstatus->link_status = + ports_link_status.port2_link_status; + port_alstatus->block_status = (PortBlocking >> 2) & 1; + port_alstatus->learn_status = (PortLearning >> 2) & 1; + port_alstatus->vlan_verify = (VlanVerify >> 2) & 1; + port_alstatus->discard_unknown = (DiscardUnknown >> 2) & 1; + port_alstatus->multi_reso = (MultiReso >> 2) & 1; + port_alstatus->broad_reso = (BroadReso >> 2) & 1; + port_alstatus->ftransmit = (FTransmit >> 2) & 1; + port_alstatus->freceive = (FReceive >> 2) & 1; + break; + default: + printk(KERN_ERR "%s:do not support the port %d", + __func__, portnum); + break; + } + return 0; +} + +int esw_atable_get_entry_port_number(struct switch_enet_private *fep, + unsigned char *mac_addr, unsigned char *port) +{ + int block_index, block_index_end, entry; + unsigned long read_lo, read_hi; + unsigned long mac_addr_lo, mac_addr_hi; + + mac_addr_lo = (unsigned long)((mac_addr[3]<<24) | (mac_addr[2]<<16) | + (mac_addr[1]<<8) | mac_addr[0]); + mac_addr_hi = (unsigned long)((mac_addr[5]<<8) | (mac_addr[4])); + + block_index = GET_BLOCK_PTR(crc8_calc(mac_addr)); + block_index_end = block_index + ATABLE_ENTRY_PER_SLOT; + + /* now search all the entries in the selected block */ + for (entry = block_index; entry < block_index_end; entry++) { + read_atable(fep, entry, &read_lo, &read_hi); + if ((read_lo == mac_addr_lo) && + ((read_hi & 0x0000ffff) == + (mac_addr_hi & 0x0000ffff))) { + /* found the correct address */ + if ((read_hi & (1 << 16)) && (!(read_hi & (1 << 17)))) + *port = AT_EXTRACT_PORT(read_hi); + break; + } else + *port = -1; + } + + return 0; +} + +int esw_get_mac_address_lookup_table(struct switch_enet_private *fep, + unsigned long *tableaddr, unsigned long *dnum, unsigned long *snum) +{ + unsigned long read_lo, read_hi; + unsigned long entry; + unsigned long dennum = 0; + unsigned long sennum = 0; + + for (entry = 0; entry < ESW_ATABLE_MEM_NUM_ENTRIES; entry++) { + read_atable(fep, entry, &read_lo, &read_hi); + if ((read_hi & (1 << 17)) && (read_hi & (1 << 16))) { + /* static entry */ + *(tableaddr + (2047 - sennum) * 11) = entry; + *(tableaddr + (2047 - sennum) * 11 + 2) = + read_lo & 0x000000ff; + *(tableaddr + (2047 - sennum) * 11 + 3) = + (read_lo & 0x0000ff00) >> 8; + *(tableaddr + (2047 - sennum) * 11 + 4) = + (read_lo & 0x00ff0000) >> 16; + *(tableaddr + (2047 - sennum) * 11 + 5) = + (read_lo & 0xff000000) >> 24; + *(tableaddr + (2047 - sennum) * 11 + 6) = + read_hi & 0x000000ff; + *(tableaddr + (2047 - sennum) * 11 + 7) = + (read_hi & 0x0000ff00) >> 8; + *(tableaddr + (2047 - sennum) * 11 + 8) = + AT_EXTRACT_PORTMASK(read_hi); + *(tableaddr + (2047 - sennum) * 11 + 9) = + AT_EXTRACT_PRIO(read_hi); + sennum++; + } else if ((read_hi & (1 << 16)) && (!(read_hi & (1 << 17)))) { + /* dynamic entry */ + *(tableaddr + dennum * 11) = entry; + *(tableaddr + dennum * 11 + 2) = read_lo & 0xff; + *(tableaddr + dennum * 11 + 3) = + (read_lo & 0x0000ff00) >> 8; + *(tableaddr + dennum * 11 + 4) = + (read_lo & 0x00ff0000) >> 16; + *(tableaddr + dennum * 11 + 5) = + (read_lo & 0xff000000) >> 24; + *(tableaddr + dennum * 11 + 6) = read_hi & 0xff; + *(tableaddr + dennum * 11 + 7) = + (read_hi & 0x0000ff00) >> 8; + *(tableaddr + dennum * 11 + 8) = + AT_EXTRACT_PORT(read_hi); + *(tableaddr + dennum * 11 + 9) = + AT_EXTRACT_TIMESTAMP(read_hi); + dennum++; + } + } + + *dnum = dennum; + *snum = sennum; + return 0; +} + +/*----------------------------------------------------------------------------*/ +/* The timer should create an interrupt every 4 seconds*/ +static void l2switch_aging_timer(unsigned long data) +{ + struct switch_enet_private *fep; + + fep = (struct switch_enet_private *)data; + + if (fep) { + TIMEINCREMENT(fep->currTime); + fep->timeChanged++; + } + + mod_timer(&fep->timer_aging, jiffies + LEARNING_AGING_TIMER); +} + +/* ----------------------------------------------------------------------- */ +void esw_check_rxb_txb_interrupt(struct switch_enet_private *fep) +{ + volatile switch_t *fecp; + fecp = fep->hwp; + + /*Enable Forced forwarding for port 1*/ + fecp->ESW_P0FFEN = MCF_ESW_P0FFEN_FEN | + MCF_ESW_P0FFEN_FD(1); + /*Disable learning for all ports*/ + fecp->switch_imask = MCF_ESW_IMR_TXB | MCF_ESW_IMR_TXF | + MCF_ESW_IMR_RXB | MCF_ESW_IMR_RXF; +} + +/*----------------------------------------------------------------*/ +static int switch_enet_learning(void *arg) +{ + struct switch_enet_private *fep = arg; + volatile switch_t *fecp; + + fecp = fep->hwp; + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + + /* check learning record valid */ + if (fecp->ESW_LSR) + esw_atable_dynamicms_learn_migration(fep, + fep->currTime); + else + schedule_timeout(HZ/100); + } + + return 0; +} + +static int switch_enet_ioctl( + struct net_device *dev, + struct ifreq *ifr, int cmd) +{ + struct switch_enet_private *fep; + volatile switch_t *fecp; + int ret = 0; + + fep = netdev_priv(dev); + fecp = (volatile switch_t *)dev->base_addr; + + switch (cmd) { + /*------------------------------------------------------------*/ + case ESW_SET_PORTENABLE_CONF: + { + eswIoctlPortEnableConfig configData; + ret = copy_from_user(&configData, + ifr->ifr_data, + sizeof(eswIoctlPortEnableConfig)); + if (ret) + return -EFAULT; + + ret = esw_port_enable_config(fep, + configData.port, + configData.tx_enable, + configData.rx_enable); + } + break; + case ESW_SET_BROADCAST_CONF: + { + eswIoctlPortConfig configData; + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPortConfig)); + if (ret) + return -EFAULT; + + ret = esw_port_broadcast_config(fep, + configData.port, configData.enable); + } + break; + + case ESW_SET_MULTICAST_CONF: + { + eswIoctlPortConfig configData; + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPortConfig)); + if (ret) + return -EFAULT; + + ret = esw_port_multicast_config(fep, + configData.port, configData.enable); + } + break; + + case ESW_SET_BLOCKING_CONF: + { + eswIoctlPortConfig configData; + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPortConfig)); + + if (ret) + return -EFAULT; + + ret = esw_port_blocking_config(fep, + configData.port, configData.enable); + } + break; + + case ESW_SET_LEARNING_CONF: + { + eswIoctlPortConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPortConfig)); + if (ret) + return -EFAULT; + + ret = esw_port_learning_config(fep, + configData.port, configData.enable); + } + break; + + case ESW_SET_PORT_ENTRY_EMPTY: + { + unsigned long portnum; + + ret = copy_from_user(&portnum, + ifr->ifr_data, sizeof(portnum)); + if (ret) + return -EFAULT; + esw_atable_dynamicms_del_entries_for_port(fep, portnum); + } + break; + + case ESW_SET_OTHER_PORT_ENTRY_EMPTY: + { + unsigned long portnum; + + ret = copy_from_user(&portnum, + ifr->ifr_data, sizeof(portnum)); + if (ret) + return -EFAULT; + + esw_atable_dynamicms_del_entries_for_other_port(fep, portnum); + } + break; + + case ESW_SET_IP_SNOOP_CONF: + { + eswIoctlIpsnoopConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlIpsnoopConfig)); + if (ret) + return -EFAULT; + + ret = esw_ip_snoop_config(fep, configData.mode, + configData.ip_header_protocol); + } + break; + + case ESW_SET_PORT_SNOOP_CONF: + { + eswIoctlPortsnoopConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPortsnoopConfig)); + if (ret) + return -EFAULT; + + ret = esw_tcpudp_port_snoop_config(fep, configData.mode, + configData.compare_port, + configData.compare_num); + } + break; + + case ESW_SET_PORT_MIRROR_CONF_PORT_MATCH: + { + struct eswIoctlMirrorCfgPortMatch configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(configData)); + if (ret) + return -EFAULT; + ret = esw_port_mirroring_config_port_match(fep, + configData.mirror_port, configData.port_match_en, + configData.port); + } + break; + + case ESW_SET_PORT_MIRROR_CONF: + { + eswIoctlPortMirrorConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPortMirrorConfig)); + if (ret) + return -EFAULT; + + ret = esw_port_mirroring_config(fep, + configData.mirror_port, configData.port, + configData.mirror_enable, + configData.src_mac, configData.des_mac, + configData.egress_en, configData.ingress_en, + configData.egress_mac_src_en, + configData.egress_mac_des_en, + configData.ingress_mac_src_en, + configData.ingress_mac_des_en); + } + break; + + case ESW_SET_PORT_MIRROR_CONF_ADDR_MATCH: + { + struct eswIoctlMirrorCfgAddrMatch configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(configData)); + if (ret) + return -EFAULT; + + ret = esw_port_mirroring_config_addr_match(fep, + configData.mirror_port, configData.addr_match_en, + configData.mac_addr); + } + break; + + case ESW_SET_PIRORITY_VLAN: + { + eswIoctlPriorityVlanConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPriorityVlanConfig)); + if (ret) + return -EFAULT; + + ret = esw_framecalssify_vlan_priority_lookup(fep, + configData.port, configData.func_enable, + configData.vlan_pri_table_num, + configData.vlan_pri_table_value); + } + break; + + case ESW_SET_PIRORITY_IP: + { + eswIoctlPriorityIPConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPriorityIPConfig)); + if (ret) + return -EFAULT; + + ret = esw_framecalssify_ip_priority_lookup(fep, + configData.port, configData.func_enable, + configData.ipv4_en, configData.ip_priority_num, + configData.ip_priority_value); + } + break; + + case ESW_SET_PIRORITY_MAC: + { + eswIoctlPriorityMacConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPriorityMacConfig)); + if (ret) + return -EFAULT; + + ret = esw_framecalssify_mac_priority_lookup(fep, + configData.port); + } + break; + + case ESW_SET_PIRORITY_DEFAULT: + { + eswIoctlPriorityDefaultConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlPriorityDefaultConfig)); + if (ret) + return -EFAULT; + + ret = esw_frame_calssify_priority_init(fep, + configData.port, configData.priority_value); + } + break; + + case ESW_SET_P0_FORCED_FORWARD: + { + eswIoctlP0ForcedForwardConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlP0ForcedForwardConfig)); + if (ret) + return -EFAULT; + + ret = esw_forced_forward(fep, configData.port1, + configData.port2, configData.enable); + } + break; + + case ESW_SET_BRIDGE_CONFIG: + { + unsigned long configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(unsigned long)); + if (ret) + return -EFAULT; + + esw_bridge_port_configure(fep, configData); + } + break; + + case ESW_SET_SWITCH_MODE: + { + unsigned long configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(unsigned long)); + if (ret) + return -EFAULT; + + esw_switch_mode_configure(fep, configData); + } + break; + + case ESW_SET_OUTPUT_QUEUE_MEMORY: + { + eswIoctlOutputQueue configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlOutputQueue)); + if (ret) + return -EFAULT; + + ret = esw_set_output_queue_memory(fep, + configData.fun_num, &configData.sOutputQueue); + } + break; + + case ESW_SET_VLAN_OUTPUT_PROCESS: + { + eswIoctlVlanOutputConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlVlanOutputConfig)); + if (ret) + return -EFAULT; + + ret = esw_vlan_output_process(fep, + configData.port, configData.mode); + } + break; + + case ESW_SET_VLAN_INPUT_PROCESS: + { + eswIoctlVlanInputConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, + sizeof(eswIoctlVlanInputConfig)); + if (ret) + return -EFAULT; + + ret = esw_vlan_input_process(fep, configData.port, + configData.mode, configData.port_vlanid); + } + break; + + case ESW_SET_VLAN_DOMAIN_VERIFICATION: + { + eswIoctlVlanVerificationConfig configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, + sizeof(eswIoctlVlanVerificationConfig)); + if (ret) + return -EFAULT; + + ret = esw_set_vlan_verification( + fep, configData.port, + configData.vlan_domain_verify_en, + configData.vlan_discard_unknown_en); + } + break; + + case ESW_SET_VLAN_RESOLUTION_TABLE: + { + eswIoctlVlanResoultionTable configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, + sizeof(eswIoctlVlanResoultionTable)); + if (ret) + return -EFAULT; + + ret = esw_set_vlan_resolution_table( + fep, configData.port_vlanid, + configData.vlan_domain_num, + configData.vlan_domain_port); + + } + break; + + case ESW_SET_VLAN_ID: + { + unsigned long configData; + ret = copy_from_user(&configData, ifr->ifr_data, + sizeof(configData)); + if (ret) + return -EFAULT; + + ret = esw_set_vlan_id(fep, configData); + } + break; + + case ESW_SET_VLAN_ID_CLEARED: + { + unsigned long configData; + ret = copy_from_user(&configData, ifr->ifr_data, + sizeof(configData)); + if (ret) + return -EFAULT; + + ret = esw_set_vlan_id_cleared(fep, configData); + } + break; + + case ESW_SET_PORT_IN_VLAN_ID: + { + eswIoctlVlanResoultionTable configData; + + ret = copy_from_user(&configData, ifr->ifr_data, + sizeof(configData)); + if (ret) + return -EFAULT; + + ret = esw_set_port_in_vlan_id(fep, configData); + } + break; + + /*--------------------------------------------------------------------*/ + case ESW_UPDATE_STATIC_MACTABLE: + { + eswIoctlUpdateStaticMACtable configData; + + ret = copy_from_user(&configData, + ifr->ifr_data, sizeof(eswIoctlUpdateStaticMACtable)); + if (ret) + return -EFAULT; + + ret = esw_update_atable_static(configData.mac_addr, + configData.port, configData.priority, fep); + } + break; + + case ESW_CLEAR_ALL_MACTABLE: + { + esw_clear_atable(fep); + } + break; + + /*-------------------get----------------------------------------------*/ + case ESW_GET_STATISTICS_STATUS: + { + esw_statistics_status Statistics; + esw_port_statistics_status PortSta; + int i; + + ret = esw_get_statistics_status(fep, &Statistics); + if (ret != 0) { + printk(KERN_ERR "%s: cmd %x fail\n", __func__, cmd); + return -1; + } + printk(KERN_INFO "DISCN : %10ld DISCB : %10ld\n", + Statistics.ESW_DISCN, Statistics.ESW_DISCB); + printk(KERN_INFO "NDISCN: %10ld NDISCB: %10ld\n", + Statistics.ESW_NDISCN, Statistics.ESW_NDISCB); + + for (i = 0; i < 3; i++) { + ret = esw_get_port_statistics_status(fep, i, + &PortSta); + if (ret != 0) { + printk(KERN_ERR "%s: cmd %x fail\n", + __func__, cmd); + return -1; + } + printk(KERN_INFO "port %d: POQC : %ld\n", + i, PortSta.MCF_ESW_POQC); + printk(KERN_INFO " PMVID : %ld\n", + PortSta.MCF_ESW_PMVID); + printk(KERN_INFO " PMVTAG: %ld\n", + PortSta.MCF_ESW_PMVTAG); + printk(KERN_INFO " PBL : %ld\n", + PortSta.MCF_ESW_PBL); + } + } + break; + + case ESW_GET_LEARNING_CONF: + { + unsigned long PortLearning; + + esw_get_port_learning(fep, &PortLearning); + ret = copy_to_user(ifr->ifr_data, &PortLearning, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_BLOCKING_CONF: + { + unsigned long PortBlocking; + + esw_get_port_blocking(fep, &PortBlocking); + ret = copy_to_user(ifr->ifr_data, &PortBlocking, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_MULTICAST_CONF: + { + unsigned long PortMulticast; + + esw_get_port_multicast(fep, &PortMulticast); + ret = copy_to_user(ifr->ifr_data, &PortMulticast, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_BROADCAST_CONF: + { + unsigned long PortBroadcast; + + esw_get_port_broadcast(fep, &PortBroadcast); + ret = copy_to_user(ifr->ifr_data, &PortBroadcast, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_PORTENABLE_CONF: + { + unsigned long PortEnable; + + esw_get_port_enable(fep, &PortEnable); + ret = copy_to_user(ifr->ifr_data, &PortEnable, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_IP_SNOOP_CONF: + { + unsigned long ESW_IPSNP[8]; + int i; + + esw_get_ip_snoop_config(fep, (unsigned long *)ESW_IPSNP); + printk(KERN_INFO "IP Protocol Mode Type\n"); + for (i = 0; i < 8; i++) { + if (ESW_IPSNP[i] != 0) + printk(KERN_INFO "%3ld " + "%1ld %s\n", + (ESW_IPSNP[i] >> 8) & 0xff, + (ESW_IPSNP[i] >> 1) & 3, + ESW_IPSNP[i] & 1 ? "Active" : + "Inactive"); + } + } + break; + + case ESW_GET_PORT_SNOOP_CONF: + { + unsigned long ESW_PSNP[8]; + int i; + + esw_get_tcpudp_port_snoop_config(fep, + (unsigned long *)ESW_PSNP); + printk(KERN_INFO "TCP/UDP Port SrcCompare DesCompare " + "Mode Type\n"); + for (i = 0; i < 8; i++) { + if (ESW_PSNP[i] != 0) + printk(KERN_INFO "%5ld %s " + "%s %1ld %s\n", + (ESW_PSNP[i] >> 16) & 0xffff, + (ESW_PSNP[i] >> 4) & 1 ? "Y" : "N", + (ESW_PSNP[i] >> 3) & 1 ? "Y" : "N", + (ESW_PSNP[i] >> 1) & 3, + ESW_PSNP[i] & 1 ? "Active" : + "Inactive"); + } + } + break; + + case ESW_GET_PORT_MIRROR_CONF: + esw_get_port_mirroring(fep); + break; + + case ESW_GET_P0_FORCED_FORWARD: + { + unsigned long ForceForward; + + esw_get_forced_forward(fep, &ForceForward); + ret = copy_to_user(ifr->ifr_data, &ForceForward, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_SWITCH_MODE: + { + unsigned long Config; + + esw_get_switch_mode(fep, &Config); + ret = copy_to_user(ifr->ifr_data, &Config, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_BRIDGE_CONFIG: + { + unsigned long Config; + + esw_get_bridge_port(fep, &Config); + ret = copy_to_user(ifr->ifr_data, &Config, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + case ESW_GET_OUTPUT_QUEUE_STATUS: + { + esw_output_queue_status Config; + esw_get_output_queue_status(fep, + &Config); + ret = copy_to_user(ifr->ifr_data, &Config, + sizeof(esw_output_queue_status)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_VLAN_OUTPUT_PROCESS: + { + unsigned long Config; + int tmp; + int i; + + esw_get_vlan_output_config(fep, &Config); + + for (i = 0; i < 3; i++) { + tmp = (Config >> (i << 1)) & 3; + + if (tmp != 0) + printk(KERN_INFO "port %d: vlan output " + "manipulation enable (mode %d)\n", + i, tmp); + else + printk(KERN_INFO "port %d: vlan output " + "manipulation disable\n", i); + } + } + break; + + case ESW_GET_VLAN_INPUT_PROCESS: + { + eswIoctlVlanInputStatus Config; + int i; + + esw_get_vlan_input_config(fep, &Config); + + for (i = 0; i < 3; i++) { + if (((Config.ESW_VIMEN >> i) & 1) == 0) + printk(KERN_INFO "port %d: vlan input " + "manipulation disable\n", i); + else + printk("port %d: vlan input manipulation enable" + " (mode %ld, vlan id %ld)\n", i, + (((Config.ESW_VIMSEL >> (i << 1)) & 3) + + 1), Config.ESW_PID[i]); + } + } + break; + + case ESW_GET_VLAN_RESOLUTION_TABLE: + { + struct eswVlanTableItem vtableitem; + unsigned char tmp0, tmp1, tmp2; + int i; + + esw_get_vlan_resolution_table(fep, &vtableitem); + + printk(KERN_INFO "VLAN Name VLAN Id Ports\n"); + for (i = 0; i < vtableitem.valid_num; i++) { + tmp0 = vtableitem.table[i].vlan_domain_port & 1; + tmp1 = (vtableitem.table[i].vlan_domain_port >> 1) & 1; + tmp2 = (vtableitem.table[i].vlan_domain_port >> 2) & 1; + printk(KERN_INFO "%2d %4d %s%s%s\n", + i, vtableitem.table[i].port_vlanid, + tmp0 ? "0 " : "", tmp1 ? "1 " : "", + tmp2 ? "2" : ""); + } + } + break; + + case ESW_GET_VLAN_DOMAIN_VERIFICATION: + { + unsigned long Config; + + esw_get_vlan_verification(fep, &Config); + ret = copy_to_user(ifr->ifr_data, &Config, + sizeof(unsigned long)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_ENTRY_PORT_NUMBER: + { + unsigned char mac_addr[6]; + unsigned char portnum; + + ret = copy_from_user(mac_addr, + ifr->ifr_data, sizeof(mac_addr)); + if (ret) + return -EFAULT; + + ret = esw_atable_get_entry_port_number(fep, mac_addr, + &portnum); + + ret = copy_to_user(ifr->ifr_data, &portnum, + sizeof(unsigned char)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_LOOKUP_TABLE: + { + unsigned long *ConfigData; + unsigned long dennum, sennum; + int i; + int tmp; + + ConfigData = kmalloc(sizeof(struct eswAddrTableEntryExample) * + ESW_ATABLE_MEM_NUM_ENTRIES, GFP_KERNEL); + ret = esw_get_mac_address_lookup_table(fep, ConfigData, + &dennum, &sennum); + printk(KERN_INFO "Dynamic entries number: %ld \n", dennum); + printk(KERN_INFO "Static entries number: %ld \n", sennum); + printk(KERN_INFO "Type MAC address Port Timestamp\n"); + for (i = 0; i < dennum; i++) { + printk(KERN_INFO "dynamic " + "%02lx-%02lx-%02lx-%02lx-%02lx-%02lx " + "%01lx %4ld\n", *(ConfigData + i * 11 + 2), + *(ConfigData + i * 11 + 3), + *(ConfigData + i * 11 + 4), + *(ConfigData + i * 11 + 5), + *(ConfigData + i * 11 + 6), + *(ConfigData + i * 11 + 7), + *(ConfigData + i * 11 + 8), + *(ConfigData + i * 11 + 9)); + } + + if (sennum != 0) + printk(KERN_INFO "Type MAC address" + " Port Priority\n"); + + for (i = 0; i < sennum; i++) { + printk(KERN_INFO "static %02lx-%02lx-%02lx-%02lx" + "-%02lx-%02lx ", + *(ConfigData + (2047 - i) * 11 + 2), + *(ConfigData + (2047 - i) * 11 + 3), + *(ConfigData + (2047 - i) * 11 + 4), + *(ConfigData + (2047 - i) * 11 + 5), + *(ConfigData + (2047 - i) * 11 + 6), + *(ConfigData + (2047 - i) * 11 + 7)); + + tmp = *(ConfigData + (2047 - i) * 11 + 8); + if ((tmp == 0) || (tmp == 2) || (tmp == 4)) + printk("%01x ", tmp >> 1); + else if (tmp == 3) + printk("0,1 "); + else if (tmp == 5) + printk("0,2 "); + else if (tmp == 6) + printk("1,2 "); + + printk("%4ld\n", *(ConfigData + (2047 - i) * 11 + 9)); + } + kfree(ConfigData); + } + break; + + case ESW_GET_PORT_STATUS: + { + unsigned long PortBlocking; + + esw_get_port_blocking(fep, &PortBlocking); + + ports_link_status.port0_block_status = PortBlocking & 1; + ports_link_status.port1_block_status = (PortBlocking >> 1) & 1; + ports_link_status.port2_block_status = PortBlocking >> 2; + + ret = copy_to_user(ifr->ifr_data, &ports_link_status, + sizeof(ports_link_status)); + if (ret) + return -EFAULT; + } + break; + + case ESW_GET_PORT_ALL_STATUS: + { + unsigned char portnum; + struct port_all_status port_astatus; + + ret = copy_from_user(&portnum, + ifr->ifr_data, sizeof(portnum)); + if (ret) + return -EFAULT; + + esw_get_port_all_status(fep, portnum, &port_astatus); + printk("Port %d status:\n", portnum); + printk(KERN_INFO "Link:%-4s Blocking:%1s Learning:%1s\n", + port_astatus.link_status ? "Up" : "Down", + port_astatus.block_status ? "Y" : "N", + port_astatus.learn_status ? "N" : "Y"); + printk(KERN_INFO "VLAN Verify:%1s Discard Unknown:%1s Multicast Res:%1s\n", + port_astatus.vlan_verify ? "Y" : "N", + port_astatus.discard_unknown ? "Y" : "N", + port_astatus.multi_reso ? "Y" : "N"); + printk(KERN_INFO "Broadcast Res:%1s Transmit:%-7s Receive:%7s\n", + port_astatus.broad_reso ? "Y" : "N", + port_astatus.ftransmit ? "Enable" : "Disable", + port_astatus.freceive ? "Enable" : "Disable"); + + } + break; + + case ESW_GET_USER_PID: + { + long get_pid = 0; + ret = copy_from_user(&get_pid, + ifr->ifr_data, sizeof(get_pid)); + + if (ret) + return -EFAULT; + user_pid = get_pid; + } + break; + /*------------------------------------------------------------------*/ + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int +switch_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct switch_enet_private *fep; + volatile switch_t *fecp; + cbd_t *bdp; + unsigned short status; + unsigned long flags; + + fep = netdev_priv(dev); + fecp = (switch_t *)fep->hwp; + + spin_lock_irqsave(&fep->hw_lock, flags); + /* Fill in a Tx ring entry */ + bdp = fep->cur_tx; + + status = bdp->cbd_sc; + + /* Clear all of the status flags. + */ + status &= ~BD_ENET_TX_STATS; + + /* Set buffer length and buffer pointer. + */ + bdp->cbd_bufaddr = __pa(skb->data); + bdp->cbd_datlen = skb->len; + + /* + * On some FEC implementations data must be aligned on + * 4-byte boundaries. Use bounce buffers to copy data + * and get it aligned. Ugh. + */ + if (bdp->cbd_bufaddr & 0x3) { + unsigned int index1; + index1 = bdp - fep->tx_bd_base; + + memcpy(fep->tx_bounce[index1], + (void *)skb->data, bdp->cbd_datlen); + bdp->cbd_bufaddr = __pa(fep->tx_bounce[index1]); + } + + /* Save skb pointer. */ + fep->tx_skbuff[fep->skb_cur] = skb; + + dev->stats.tx_bytes += skb->len; + fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; + + /* Push the data cache so the CPM does not get stale memory + * data. + */ +// flush_dcache_range((unsigned long)skb->data, + flush_kernel_vmap_range(skb->data, + (unsigned long)skb->data + skb->len); + + /* Send it on its way. Tell FEC it's ready, interrupt when done, + * it's the last BD of the frame, and to put the CRC on the end. + */ + + status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR + | BD_ENET_TX_LAST | BD_ENET_TX_TC); + bdp->cbd_sc = status; + dev->trans_start = jiffies; + + /* Trigger transmission start */ + fecp->fec_x_des_active = MCF_ESW_TDAR_X_DES_ACTIVE; + + /* If this was the last BD in the ring, + * start at the beginning again.*/ + if (status & BD_ENET_TX_WRAP) + bdp = fep->tx_bd_base; + else + bdp++; + + if (bdp == fep->dirty_tx) { + fep->tx_full = 1; + netif_stop_queue(dev); + printk(KERN_ERR "%s: net stop\n", __func__); + } + + fep->cur_tx = (cbd_t *)bdp; + + spin_unlock_irqrestore(&fep->hw_lock, flags); + + return 0; +} + +static void +switch_timeout(struct net_device *dev) +{ + struct switch_enet_private *fep = netdev_priv(dev); + + printk(KERN_ERR "%s: transmit timed out.\n", dev->name); + dev->stats.tx_errors++; + switch_restart(dev, fep->full_duplex); + netif_wake_queue(dev); +} + +/* The interrupt handler. + * This is called from the MPC core interrupt. + */ +static irqreturn_t +switch_enet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + volatile switch_t *fecp; + uint int_events; + irqreturn_t ret = IRQ_NONE; + + fecp = (switch_t *)dev->base_addr; + + /* Get the interrupt events that caused us to be here. + */ + do { + int_events = fecp->switch_ievent; + fecp->switch_ievent = int_events; + /* Handle receive event in its own function. */ + + /* Transmit OK, or non-fatal error. Update the buffer + descriptors. Switch handles all errors, we just discover + them as part of the transmit process. + */ + if (int_events & MCF_ESW_ISR_OD0) + ret = IRQ_HANDLED; + + if (int_events & MCF_ESW_ISR_OD1) + ret = IRQ_HANDLED; + + if (int_events & MCF_ESW_ISR_OD2) + ret = IRQ_HANDLED; + + if (int_events & MCF_ESW_ISR_RXB) + ret = IRQ_HANDLED; + + if (int_events & MCF_ESW_ISR_RXF) { + ret = IRQ_HANDLED; + switch_enet_rx(dev); + } + + if (int_events & MCF_ESW_ISR_TXB) + ret = IRQ_HANDLED; + + if (int_events & MCF_ESW_ISR_TXF) { + ret = IRQ_HANDLED; + switch_enet_tx(dev); + } + + } while (int_events); + + return ret; +} + +static void +switch_enet_tx(struct net_device *dev) +{ + struct switch_enet_private *fep; + cbd_t *bdp; + unsigned short status; + struct sk_buff *skb; + + fep = netdev_priv(dev); + spin_lock_irq(&fep->hw_lock); + bdp = fep->dirty_tx; + + while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { + if (bdp == fep->cur_tx && fep->tx_full == 0) + break; + + skb = fep->tx_skbuff[fep->skb_dirty]; + /* Check for errors. */ + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | + BD_ENET_TX_CSL)) { + dev->stats.tx_errors++; + if (status & BD_ENET_TX_HB) /* No heartbeat */ + dev->stats.tx_heartbeat_errors++; + if (status & BD_ENET_TX_LC) /* Late collision */ + dev->stats.tx_window_errors++; + if (status & BD_ENET_TX_RL) /* Retrans limit */ + dev->stats.tx_aborted_errors++; + if (status & BD_ENET_TX_UN) /* Underrun */ + dev->stats.tx_fifo_errors++; + if (status & BD_ENET_TX_CSL) /* Carrier lost */ + dev->stats.tx_carrier_errors++; + } else { + dev->stats.tx_packets++; + } + + /* Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (status & BD_ENET_TX_DEF) + dev->stats.collisions++; + + /* Free the sk buffer associated with this last transmit. + */ + dev_kfree_skb_any(skb); + fep->tx_skbuff[fep->skb_dirty] = NULL; + fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; + + /* Update pointer to next buffer descriptor to be transmitted. + */ + if (status & BD_ENET_TX_WRAP) + bdp = fep->tx_bd_base; + else + bdp++; + + /* Since we have freed up a buffer, the ring is no longer + * full. + */ + if (fep->tx_full) { + fep->tx_full = 0; + printk(KERN_ERR "%s: tx full is zero\n", __func__); + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + } + } + fep->dirty_tx = (cbd_t *)bdp; + spin_unlock_irq(&fep->hw_lock); +} + + +/* During a receive, the cur_rx points to the current incoming buffer. + * When we update through the ring, if the next incoming buffer has + * not been given to the system, we just set the empty indicator, + * effectively tossing the packet. + */ +static void +switch_enet_rx(struct net_device *dev) +{ + struct switch_enet_private *fep; + volatile switch_t *fecp; + cbd_t *bdp; + unsigned short status; + struct sk_buff *skb; + ushort pkt_len; + __u8 *data; + + fep = netdev_priv(dev); + /*fecp = (volatile switch_t *)dev->base_addr;*/ + fecp = (volatile switch_t *)fep->hwp; + + spin_lock_irq(&fep->hw_lock); + /* First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = fep->cur_rx; + + while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { + + /* Since we have allocated space to hold a complete frame, + * the last indicator should be set. + * */ + if ((status & BD_ENET_RX_LAST) == 0) + printk(KERN_ERR "SWITCH ENET: rcv is not +last\n"); + + if (!fep->opened) + goto rx_processing_done; + + /* Check for errors. */ + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | + BD_ENET_RX_CR | BD_ENET_RX_OV)) { + dev->stats.rx_errors++; + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { + /* Frame too long or too short. */ + dev->stats.rx_length_errors++; + } + if (status & BD_ENET_RX_NO) /* Frame alignment */ + dev->stats.rx_frame_errors++; + if (status & BD_ENET_RX_CR) /* CRC Error */ + dev->stats.rx_crc_errors++; + if (status & BD_ENET_RX_OV) /* FIFO overrun */ + dev->stats.rx_fifo_errors++; + } + /* Report late collisions as a frame error. + * On this error, the BD is closed, but we don't know what we + * have in the buffer. So, just drop this frame on the floor. + * */ + if (status & BD_ENET_RX_CL) { + dev->stats.rx_errors++; + dev->stats.rx_frame_errors++; + goto rx_processing_done; + } + /* Process the incoming frame */ + dev->stats.rx_packets++; + pkt_len = bdp->cbd_datlen; + dev->stats.rx_bytes += pkt_len; + data = (__u8 *)__va(bdp->cbd_bufaddr); + + /* This does 16 byte alignment, exactly what we need. + * The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + * */ + skb = dev_alloc_skb(pkt_len); + + if (skb == NULL) + dev->stats.rx_dropped++; + else { + skb_put(skb, pkt_len); /* Make room */ + skb_copy_to_linear_data(skb, data, pkt_len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + } +rx_processing_done: + + /* Clear the status flags for this buffer */ + status &= ~BD_ENET_RX_STATS; + + /* Mark the buffer empty */ + status |= BD_ENET_RX_EMPTY; + bdp->cbd_sc = status; + + /* Update BD pointer to next entry */ + if (status & BD_ENET_RX_WRAP) + bdp = fep->rx_bd_base; + else + bdp++; + + /* Doing this here will keep the FEC running while we process + * incoming frames. On a heavily loaded network, we should be + * able to keep up at the expense of system resources. + * */ + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE; + } + fep->cur_rx = (cbd_t *)bdp; + + spin_unlock_irq(&fep->hw_lock); +} + +static int fec_mdio_transfer(struct mii_bus *bus, int phy_id, + int reg, int regval) +{ + struct net_device *dev = bus->priv; + unsigned long flags; + struct switch_enet_private *fep; + int tries = 100; + int retval = 0; + + fep = netdev_priv(dev); + spin_lock_irqsave(&fep->mii_lock, flags); + + regval |= phy_id << 23; +#if 0 + MCF_FEC_MMFR0 = regval; +#else + writel(regval, fep->fec[0] + FEC_MII_DATA); +#endif + + /* wait for it to finish, this takes about 23 us on lite5200b */ +#if 0 + while (!(MCF_FEC_EIR0 & FEC_ENET_MII) && --tries) +#else + while (!(readl(fep->fec[0]+FEC_IEVENT) & FEC_ENET_MII) && --tries) +#endif + udelay(5); + + if (!tries) { + printk(KERN_ERR "%s timeout\n", __func__); + return -ETIMEDOUT; + } + +#if 0 + MCF_FEC_EIR0 = FEC_ENET_MII; +#else + writel(FEC_ENET_MII, fep->fec[0]+FEC_IEVENT); +#endif + +#if 0 + retval = MCF_FEC_MMFR0; +#else + retval = readl(fep->fec[0] + FEC_MII_DATA); +#endif + + spin_unlock_irqrestore(&fep->mii_lock, flags); + + return retval; +} + + +static int mvf_fec_mdio_read(struct mii_bus *bus, + int phy_id, int reg) +{ + int ret; + ret = fec_mdio_transfer(bus, phy_id, reg, + mk_mii_read(reg)); + return ret; +} + +static int mvf_fec_mdio_write(struct mii_bus *bus, + int phy_id, int reg, u16 data) +{ + return fec_mdio_transfer(bus, phy_id, reg, + mk_mii_write(reg, data)); +} + +static void switch_adjust_link1(struct net_device *dev) +{ + struct switch_enet_private *priv = netdev_priv(dev); + struct phy_device *phydev1 = priv->phydev[0]; + int new_state = 0; + + if (phydev1->link != PHY_DOWN) { + if (phydev1->duplex != priv->phy1_duplex) { + new_state = 1; + priv->phy1_duplex = phydev1->duplex; + } + + if (phydev1->speed != priv->phy1_speed) { + new_state = 1; + priv->phy1_speed = phydev1->speed; + } + + if (priv->phy1_old_link == PHY_DOWN) { + new_state = 1; + priv->phy1_old_link = phydev1->link; + } + } else if (priv->phy1_old_link) { + new_state = 1; + priv->phy1_old_link = PHY_DOWN; + priv->phy1_speed = 0; + priv->phy1_duplex = -1; + } + + if (new_state) { + ports_link_status.port1_link_status = phydev1->link; + if (phydev1->link == PHY_DOWN) + esw_atable_dynamicms_del_entries_for_port(priv, 1); + + /*Send the new status to user space*/ + if (user_pid != 1) + sys_tkill(user_pid, SIGUSR1); + } +} + +static void switch_adjust_link2(struct net_device *dev) +{ + struct switch_enet_private *priv = netdev_priv(dev); + struct phy_device *phydev2 = priv->phydev[1]; + int new_state = 0; + + if (phydev2->link != PHY_DOWN) { + if (phydev2->duplex != priv->phy2_duplex) { + new_state = 1; + priv->phy2_duplex = phydev2->duplex; + } + + if (phydev2->speed != priv->phy2_speed) { + new_state = 1; + priv->phy2_speed = phydev2->speed; + } + + if (priv->phy2_old_link == PHY_DOWN) { + new_state = 1; + priv->phy2_old_link = phydev2->link; + } + } else if (priv->phy2_old_link) { + new_state = 1; + priv->phy2_old_link = PHY_DOWN; + priv->phy2_speed = 0; + priv->phy2_duplex = -1; + } + + if (new_state) { + ports_link_status.port2_link_status = phydev2->link; + if (phydev2->link == PHY_DOWN) + esw_atable_dynamicms_del_entries_for_port(priv, 2); + + /*Send the new status to user space*/ + if (user_pid != 1) + sys_tkill(user_pid, SIGUSR1); + } +} + +static int mvf_switch_init_phy(struct net_device *dev) +{ + struct switch_enet_private *priv = netdev_priv(dev); + struct phy_device *phydev[SWITCH_EPORT_NUMBER] = {NULL, NULL}; + int i, startnode = 0; + + /* search for connect PHY device */ + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *const tmp_phydev = + priv->mdio_bus->phy_map[i]; + + if (!tmp_phydev) + continue; + +#ifdef CONFIG_FEC_SHARED_PHY + if (priv->index == 0) { + phydev[i] = tmp_phydev; + } + else if (priv->index == 1) { + if (startnode == 1) { + phydev[i] = tmp_phydev; + startnode = 0; + } else { + startnode++; + continue; + } + } else + printk(KERN_INFO "%s now we do not" + "support (%d) more than" + "2 phys shared " + "one mdio bus\n", + __func__, startnode); +#else + phydev[i] = tmp_phydev; +#endif + } + + /* now we are supposed to have a proper phydev, to attach to... */ + if ((!phydev[0]) && (!phydev[1])) { + printk(KERN_INFO "%s: Don't found any phy device at all\n", + dev->name); + return -ENODEV; + } + + priv->phy1_link = PHY_DOWN; + priv->phy1_old_link = PHY_DOWN; + priv->phy1_speed = 0; + priv->phy1_duplex = -1; + + priv->phy2_link = PHY_DOWN; + priv->phy2_old_link = PHY_DOWN; + priv->phy2_speed = 0; + priv->phy2_duplex = -1; + +#ifndef CONFIG_ARCH_MVF + phydev[0] = phy_connect(dev, phydev[0]->dev.bus_id, +#else + phydev[0] = phy_connect(dev, dev_name(&phydev[0]->dev), +#endif + &switch_adjust_link1, 0, PHY_INTERFACE_MODE_MII); + if (IS_ERR(phydev[0])) { + printk(KERN_ERR " %s phy_connect failed\n", __func__); + return PTR_ERR(phydev[0]); + } + +#ifndef CONFIG_ARCH_MVF + phydev[1] = phy_connect(dev, phydev[1]->dev.bus_id, +#else + phydev[0] = phy_connect(dev, dev_name(&phydev[0]->dev), +#endif + &switch_adjust_link2, 0, PHY_INTERFACE_MODE_MII); + if (IS_ERR(phydev[1])) { + printk(KERN_ERR " %s phy_connect failed\n", __func__); + return PTR_ERR(phydev[1]); + } + + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) { + printk(KERN_INFO "attached phy %i to driver %s\n", + phydev[i]->addr, phydev[i]->drv->name); + priv->phydev[i] = phydev[i]; + } + + return 0; +} +/* -----------------------------------------------------------------------*/ +static int +switch_enet_open(struct net_device *dev) +{ + struct switch_enet_private *fep = netdev_priv(dev); + volatile switch_t *fecp; + int i; + + fecp = (volatile switch_t *)fep->hwp; + /* I should reset the ring buffers here, but I don't yet know + * a simple way to do that. + */ + switch_set_mac_address(dev); + + fep->phy1_link = 0; + fep->phy2_link = 0; + +#ifdef FEC_PHY + mvf_switch_init_phy(dev); + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) { + phy_write(fep->phydev[i], MII_BMCR, BMCR_RESET); + phy_start(fep->phydev[i]); + } +#endif + fep->phy1_old_link = 0; + fep->phy2_old_link = 0; + fep->phy1_link = 1; + fep->phy2_link = 1; + + /* no phy, go full duplex, it's most likely a hub chip */ + switch_restart(dev, 1); + + /* if the fec is the fist open, we need to do nothing*/ + /* if the fec is not the fist open, we need to restart the FEC*/ + if (fep->sequence_done == 0) + switch_restart(dev, 1); + else + fep->sequence_done = 0; + + fep->currTime = 0; + fep->learning_irqhandle_enable = 0; + + fecp->ESW_PER = 0x70007; + fecp->ESW_DBCR = MCF_ESW_DBCR_P0 | MCF_ESW_DBCR_P1 | MCF_ESW_DBCR_P2; + fecp->ESW_DMCR = MCF_ESW_DMCR_P0 | MCF_ESW_DMCR_P1 | MCF_ESW_DMCR_P2; + + netif_start_queue(dev); + fep->opened = 1; + return 0; +} + +static int +switch_enet_close(struct net_device *dev) +{ + struct switch_enet_private *fep = netdev_priv(dev); + int i; + + /* Don't know what to do yet.*/ + fep->opened = 0; + netif_stop_queue(dev); + switch_stop(dev); +#ifdef FEC_PHY + for (i = 0; i < SWITCH_EPORT_NUMBER; i++) { + phy_disconnect(fep->phydev[i]); + phy_stop(fep->phydev[i]); + phy_write(fep->phydev[i], MII_BMCR, BMCR_PDOWN); + } +#endif + return 0; +} + +/* Set or clear the multicast filter for this adaptor. + * Skeleton taken from sunlance driver. + * The CPM Ethernet implementation allows Multicast as well as individual + * MAC address filtering. Some of the drivers check to make sure it is + * a group multicast address, and discard those that are not. I guess I + * will do the same for now, but just remove the test if you want + * individual filtering as well (do the upper net layers want or support + * this kind of feature?). + */ + +#define HASH_BITS 6 /* #bits in hash */ +#define CRC32_POLY 0xEDB88320 + +static void set_multicast_list(struct net_device *dev) +{ + struct switch_enet_private *fep; + volatile switch_t *ep; + unsigned int i, bit, data, crc; + struct netdev_hw_addr *ha; + + fep = netdev_priv(dev); + ep = fep->hwp; + + if (dev->flags & IFF_PROMISC) { + /*ep->fec_r_cntrl |= 0x0008;*/ + printk(KERN_INFO "%s IFF_PROMISC\n", __func__); + } else { + + /*ep->fec_r_cntrl &= ~0x0008;*/ + + if (dev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's. + */ + printk(KERN_INFO "%s IFF_ALLMULTI\n", __func__); + } else { + /* Clear filter and add the addresses in hash register. + */ + /*ep->fec_grp_hash_table_high = 0; + ep->fec_grp_hash_table_low = 0;*/ + + netdev_for_each_mc_addr(ha, dev) { + /* Only support group multicast for now */ + if (!(ha->addr[0] & 1)) + continue; + + /* calculate crc32 value of mac address */ + crc = 0xffffffff; + + for (i = 0; i < dev->addr_len; i++) { + data = ha->addr[i]; + for (bit = 0; bit < 8; bit++, data >>= 1) { + crc = (crc >> 1) ^ + (((crc ^ data) & 1) ? CRC32_POLY : 0); + } + } + + } + } + } +} + +/* Set a MAC change in hardware.*/ +static void +switch_set_mac_address(struct net_device *dev) +{ + volatile switch_t *fecp; + + fecp = ((struct switch_enet_private *)netdev_priv(dev))->hwp; +} + +static void +switch_hw_init( struct switch_enet_private *fep) +{ +#if 0 + /* GPIO config - RMII mode for both MACs */ + MCF_GPIO_PAR_FEC = (MCF_GPIO_PAR_FEC & + MCF_GPIO_PAR_FEC_FEC_MASK) | + MCF_GPIO_PAR_FEC_FEC_RMII0FUL_1FUL; +#endif + + +#if 0 + /* Initialize MAC 0/1 */ + /* RCR */ + MCF_FEC_RCR0 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE | + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD); + MCF_FEC_RCR1 = (MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE | + MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD); + /* TCR */ + MCF_FEC_TCR0 = MCF_FEC_TCR_FDEN; + MCF_FEC_TCR1 = MCF_FEC_TCR_FDEN; +#else + writel((MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE | MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD), fep->fec[0] + FEC_R_CNTRL); + writel((MCF_FEC_RCR_PROM | MCF_FEC_RCR_RMII_MODE | MCF_FEC_RCR_MAX_FL(1522) | MCF_FEC_RCR_CRC_FWD), fep->fec[1] + FEC_R_CNTRL); + + writel(MCF_FEC_TCR_FDEN, fep->fec[0] + FEC_X_CNTRL); + writel(MCF_FEC_TCR_FDEN, fep->fec[1] + FEC_X_CNTRL); +#endif + + + +#if 0 + /* ECR */ +#ifdef CONFIG_ENHANCED_BD + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588; + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588; +#else + MCF_FEC_ECR0 = MCF_FEC_ECR_ETHER_EN; + MCF_FEC_ECR1 = MCF_FEC_ECR_ETHER_EN; +#endif + +#else + + +#ifdef CONFIG_ENHANCED_BD + writel(MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588, fep->fec[0] + FEC_ECNTRL); + writel(MCF_FEC_ECR_ETHER_EN | MCF_FEC_ECR_ENA_1588, fep->fec[1] + FEC_ECNTRL); +#else + writel(MCF_FEC_ECR_ETHER_EN , fep->fec[0] + FEC_ECNTRL); + writel(MCF_FEC_ECR_ETHER_EN , fep->fec[1] + FEC_ECNTRL); +#endif + +#endif + + +#if 0 + MCF_FEC_MSCR0 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; + MCF_FEC_MSCR1 = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; + + MCF_FEC_EIMR0 = FEC_ENET_TXF | FEC_ENET_RXF; + MCF_FEC_EIMR1 = FEC_ENET_TXF | FEC_ENET_RXF; + /*MCF_PPMHR0*/ + MCF_PPMCR0 = 0; +#else + writel( MVF_MII_SWITCH_SPEED, fep->fec[0] + FEC_MII_SPEED); + writel( MVF_MII_SWITCH_SPEED, fep->fec[1] + FEC_MII_SPEED); + writel( FEC_ENET_TXF | FEC_ENET_RXF, fep->fec[0] + FEC_IMASK); + writel( FEC_ENET_TXF | FEC_ENET_RXF, fep->fec[1] + FEC_IMASK); + +// MCF_PPMCR0 = 0; +// writel( , fep->fec[0] + ); + #pragma message "need fix!!!!!" +#endif + + +} + +#ifdef CONFIG_ARCH_MVF +static const struct net_device_ops mvf_switch_ops = { + .ndo_open = switch_enet_open, + .ndo_stop = switch_enet_close, + .ndo_start_xmit = switch_enet_start_xmit, + .ndo_tx_timeout = switch_timeout, + .ndo_do_ioctl = switch_enet_ioctl, + .ndo_set_multicast_list = set_multicast_list, +}; +#endif + + +/* Initialize the FEC Ethernet. + */ + /* + * XXX: We need to clean up on failure exits here. + */ +int __init switch_enet_init(struct net_device *dev, + int slot, struct platform_device *pdev) +{ + struct switch_enet_private *fep = netdev_priv(dev); + unsigned long mem_addr; + cbd_t *bdp; + cbd_t *cbd_base; + volatile switch_t *fecp; + int i, j; + struct mvf_switch_platform_data *plat = + pdev->dev.platform_data; + + /* Only allow us to be probed once. */ + if (slot >= SWITCH_MAX_PORTS) + return -ENXIO; + + /* Allocate memory for buffer descriptors. + */ + mem_addr = __get_free_page(GFP_DMA); + if (mem_addr == 0) { + printk(KERN_ERR "Switch: allocate descriptor memory failed?\n"); + return -ENOMEM; + } + + spin_lock_init(&fep->hw_lock); + spin_lock_init(&fep->mii_lock); + + /* Create an Ethernet device instance. + */ +#ifdef CONFIG_ARCH_MVF + fecp = (volatile switch_t *)ioremap(plat->switch_hw[0], SZ_4K); +#else + fecp = (volatile switch_t *)plat->switch_hw[0]; +#endif + + fep->index = slot; + fep->hwp = fecp; +#ifdef CONFIG_ARCH_MVF + fep->hwentry = (eswAddrTable_t *)ioremap(plat->switch_hw[1], SZ_4K); +#else + fep->hwentry = (eswAddrTable_t *)plat->switch_hw[1]; +#endif + + // new add +#ifdef CONFIG_ARCH_MVF + fep->fec[0] = ioremap(plat->fec_hw[0], SZ_4K); + fep->fec[1] = ioremap(plat->fec_hw[1], SZ_4K); +#endif + + + fep->netdev = dev; +#ifdef CONFIG_FEC_SHARED_PHY + fep->phy_hwp = (volatile switch_t *) plat->switch_hw[slot & ~1]; +#else + fep->phy_hwp = fecp; +#endif + + /* + * SWITCH CONFIGURATION + */ + fecp->ESW_MODE = MCF_ESW_MODE_SW_RST; + udelay(10); + /* enable switch*/ + fecp->ESW_MODE = MCF_ESW_MODE_STATRST; + fecp->ESW_MODE = MCF_ESW_MODE_SW_EN; + + /* Enable transmit/receive on all ports */ + fecp->ESW_PER = 0xffffffff; + + /* Management port configuration, + * make port 0 as management port */ + fecp->ESW_BMPC = 0; + + /* clear all switch irq*/ + fecp->switch_ievent = 0xffffffff; + fecp->switch_imask = 0; + + udelay(10); + + /* Set the Ethernet address. If using multiple Enets on the 8xx, + * this needs some work to get unique addresses. + * + * This is our default MAC address unless the user changes + * it via eth_mac_addr (our dev->set_mac_addr handler). + */ + if (plat && plat->get_mac) + plat->get_mac(dev); + + cbd_base = (cbd_t *)mem_addr; + /* XXX: missing check for allocation failure */ + if (plat && plat->uncache) + plat->uncache(mem_addr); + + /* Set receive and transmit descriptor base. + */ + fep->rx_bd_base = cbd_base; + fep->tx_bd_base = cbd_base + RX_RING_SIZE; + + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->cur_rx = fep->rx_bd_base; + + fep->skb_cur = fep->skb_dirty = 0; + + /* Initialize the receive buffer descriptors. */ + bdp = fep->rx_bd_base; + + for (i = 0; i < SWITCH_ENET_RX_PAGES; i++) { + + /* Allocate a page. + */ + mem_addr = __get_free_page(GFP_DMA); + /* XXX: missing check for allocation failure */ + if (plat && plat->uncache) + plat->uncache(mem_addr); + + /* Initialize the BD for every fragment in the page. + */ + for (j = 0; j < SWITCH_ENET_RX_FRPPG; j++) { + bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp->cbd_bufaddr = __pa(mem_addr); +#ifdef CONFIG_ENHANCED_BD + bdp->bdu = 0x00000000; + bdp->ebd_status = RX_BD_INT; +#endif + mem_addr += SWITCH_ENET_RX_FRSIZE; + bdp++; + } + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + /* ...and the same for transmmit. + */ + bdp = fep->tx_bd_base; + for (i = 0, j = SWITCH_ENET_TX_FRPPG; i < TX_RING_SIZE; i++) { + if (j >= SWITCH_ENET_TX_FRPPG) { + mem_addr = __get_free_page(GFP_DMA); + j = 1; + } else { + mem_addr += SWITCH_ENET_TX_FRSIZE; + j++; + } + fep->tx_bounce[i] = (unsigned char *) mem_addr; + + /* Initialize the BD for every fragment in the page. + */ + bdp->cbd_sc = 0; + bdp->cbd_bufaddr = 0; + bdp++; + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + /* Set receive and transmit descriptor base. + */ + fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); + fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); + + /* Install our interrupt handlers. This varies depending on + * the architecture. + */ + if (plat && plat->request_intrs) + plat->request_intrs(dev, switch_enet_interrupt, dev); + + fecp->fec_r_buff_size = RX_BUFFER_SIZE; + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE; + dev->base_addr = (unsigned long)fecp; + + /* The FEC Ethernet specific entries in the device structure. */ +#ifndef CONFIG_ARCH_MVF + dev->open = switch_enet_open; + dev->hard_start_xmit = switch_enet_start_xmit; + dev->tx_timeout = switch_timeout; + dev->watchdog_timeo = TX_TIMEOUT; + dev->stop = switch_enet_close; + dev->set_multicast_list = set_multicast_list; + dev->do_ioctl = switch_enet_ioctl; +#else + dev->watchdog_timeo = TX_TIMEOUT; + dev->netdev_ops = &mvf_switch_ops; +#endif + + /* setup MII interface */ + if (plat && plat->set_mii) + plat->set_mii(dev); + + /* Clear and enable interrupts */ + fecp->switch_ievent = 0xffffffff; + fecp->switch_imask = MCF_ESW_IMR_RXB | MCF_ESW_IMR_TXB | + MCF_ESW_IMR_RXF | MCF_ESW_IMR_TXF; + esw_clear_atable(fep); + /* Queue up command to detect the PHY and initialize the + * remainder of the interface. + */ +#ifndef CONFIG_FEC_SHARED_PHY + fep->phy_addr = 0; +#else + fep->phy_addr = fep->index; +#endif + + fep->sequence_done = 1; + return 0; +} + +/* This function is called to start or restart the FEC during a link + * change. This only happens when switching between half and full + * duplex. + */ +static void +switch_restart(struct net_device *dev, int duplex) +{ + struct switch_enet_private *fep; + cbd_t *bdp; + volatile switch_t *fecp; + int i; + struct mvf_switch_platform_data *plat; + + fep = netdev_priv(dev); + fecp = fep->hwp; + plat = fep->pdev->dev.platform_data; + /* Whack a reset. We should wait for this.*/ +#if 0 + MCF_FEC_ECR0 = 1; + MCF_FEC_ECR1 = 1; +#else + writel(1, fep->fec[0] + FEC_ECNTRL); + writel(1, fep->fec[1] + FEC_ECNTRL); +#endif + + udelay(10); + + fecp->ESW_MODE = MCF_ESW_MODE_SW_RST; + udelay(10); + fecp->ESW_MODE = MCF_ESW_MODE_STATRST; + fecp->ESW_MODE = MCF_ESW_MODE_SW_EN; + + /* Enable transmit/receive on all ports */ + fecp->ESW_PER = 0xffffffff; + + /* Management port configuration, + * make port 0 as management port */ + fecp->ESW_BMPC = 0; + + /* Clear any outstanding interrupt. + */ + fecp->switch_ievent = 0xffffffff; + + /* Set station address.*/ + switch_set_mac_address(dev); + + switch_hw_init(fep); + + /* Reset all multicast.*/ + + /* Set maximum receive buffer size. + */ + fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; + + if (plat && plat->localhw_setup) + plat->localhw_setup(); + /* Set receive and transmit descriptor base. + */ + fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); + fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); + + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->cur_rx = fep->rx_bd_base; + + /* Reset SKB transmit buffers. + */ + fep->skb_cur = fep->skb_dirty = 0; + for (i = 0; i <= TX_RING_MOD_MASK; i++) { + if (fep->tx_skbuff[i] != NULL) { + dev_kfree_skb_any(fep->tx_skbuff[i]); + fep->tx_skbuff[i] = NULL; + } + } + + /* Initialize the receive buffer descriptors. + */ + bdp = fep->rx_bd_base; + for (i = 0; i < RX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. + */ + bdp->cbd_sc = BD_ENET_RX_EMPTY; +#ifdef CONFIG_ENHANCED_BD + bdp->bdu = 0x00000000; + bdp->ebd_status = RX_BD_INT; +#endif + bdp++; + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + /* ...and the same for transmmit. + */ + bdp = fep->tx_bd_base; + for (i = 0; i < TX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page.*/ + bdp->cbd_sc = 0; + bdp->cbd_bufaddr = 0; + bdp++; + } + + /* Set the last buffer to wrap.*/ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + fep->full_duplex = duplex; + + /* And last, enable the transmit and receive processing.*/ + fecp->fec_r_buff_size = RX_BUFFER_SIZE; + fecp->fec_r_des_active = MCF_ESW_RDAR_R_DES_ACTIVE; + + /* Enable interrupts we wish to service. + */ + fecp->switch_ievent = 0xffffffff; + fecp->switch_imask = MCF_ESW_IMR_RXF | MCF_ESW_IMR_TXF | + MCF_ESW_IMR_RXB | MCF_ESW_IMR_TXB; +} + +static void +switch_stop(struct net_device *dev) +{ + volatile switch_t *fecp; + struct switch_enet_private *fep; + struct mvf_switch_platform_data *plat; + + fep = netdev_priv(dev); + fecp = fep->hwp; + plat = fep->pdev->dev.platform_data; + /* + ** We cannot expect a graceful transmit stop without link !!! + */ + if (fep->phy1_link) + udelay(10); + if (fep->phy2_link) + udelay(10); + + /* Whack a reset. We should wait for this. + */ + udelay(10); +} + +static int fec_mdio_register(struct net_device *dev, + int slot) +{ + int err = 0; + struct switch_enet_private *fep = netdev_priv(dev); + + fep->mdio_bus = mdiobus_alloc(); + if (!fep->mdio_bus) { + printk(KERN_ERR "ethernet switch mdiobus_alloc fail\n"); + return -ENOMEM; + } + + if (slot == 0) { + fep->mdio_bus->name = "MVF switch MII 0 Bus"; + strcpy(fep->mdio_bus->id, "0"); + } else if (slot == 1) { + fep->mdio_bus->name = "MVF switch MII 1 Bus"; + strcpy(fep->mdio_bus->id, "1"); + } else { + printk(KERN_ERR "Now mvf can not" + "support more than 2 mii bus\n"); + } + + fep->mdio_bus->read = &mvf_fec_mdio_read; + fep->mdio_bus->write = &mvf_fec_mdio_write; + fep->mdio_bus->priv = dev; + err = mdiobus_register(fep->mdio_bus); + if (err) { + mdiobus_free(fep->mdio_bus); + printk(KERN_ERR "%s: ethernet mdiobus_register fail\n", + dev->name); + return -EIO; + } + + printk(KERN_INFO "mdiobus_register %s ok\n", + fep->mdio_bus->name); + return err; +} + +static int __init eth_switch_probe(struct platform_device *pdev) +{ + struct net_device *dev; + int i, err; + struct switch_enet_private *fep; + + struct switch_platform_private *chip; + struct task_struct *task; + + printk(KERN_INFO "Ethernet Switch Version 1.0\n"); + chip = kzalloc(sizeof(struct switch_platform_private) + + sizeof(struct switch_enet_private *) * SWITCH_MAX_PORTS, + GFP_KERNEL); + if (!chip) { + err = -ENOMEM; + printk(KERN_ERR "%s: kzalloc fail %x\n", __func__, + (unsigned int)chip); + return err; + } + + chip->pdev = pdev; + chip->num_slots = SWITCH_MAX_PORTS; + platform_set_drvdata(pdev, chip); + + for (i = 0; (i < chip->num_slots); i++) { + dev = alloc_etherdev(sizeof(struct switch_enet_private)); + if (!dev) { + printk(KERN_ERR "%s: ethernet switch alloc_etherdev fail\n", + dev->name); + return -ENOMEM; + } + + fep = netdev_priv(dev); + fep->pdev = pdev; + printk(KERN_ERR "%s: ethernet switch port %d init\n", + __func__, i); + err = switch_enet_init(dev, i, pdev); + if (err) { + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + kfree(chip); + continue; + } + + chip->fep_host[i] = fep; + /* disable mdio */ +#ifdef FEC_PHY +#ifdef CONFIG_FEC_SHARED_PHY + if (i == 0) + err = fec_mdio_register(dev, 0); + else { + fep->mdio_bus = chip->fep_host[0]->mdio_bus; + printk(KERN_INFO "FEC%d SHARED the %s ok\n", + i, fep->mdio_bus->name); + } +#else + err = fec_mdio_register(dev, i); +#endif + if (err) { + printk(KERN_ERR "%s: ethernet switch fec_mdio_register\n", + dev->name); + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + kfree(chip); + return -ENOMEM; + } +#endif + /* setup timer for Learning Aging function */ + init_timer(&fep->timer_aging); + fep->timer_aging.function = l2switch_aging_timer; + fep->timer_aging.data = (unsigned long) fep; + fep->timer_aging.expires = jiffies + LEARNING_AGING_TIMER; + add_timer(&fep->timer_aging); + + /* register network device*/ + if (register_netdev(dev) != 0) { + /* XXX: missing cleanup here */ + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + kfree(chip); + printk(KERN_ERR "%s: ethernet switch register_netdev fail\n", + dev->name); + return -EIO; + } + + task = kthread_run(switch_enet_learning, fep, + "mvf l2switch"); + if (IS_ERR(task)) { + err = PTR_ERR(task); + return err; + } + + printk(KERN_INFO "%s: ethernet switch %pM\n", + dev->name, dev->dev_addr); + } + + return 0; +} + +static int eth_switch_remove(struct platform_device *pdev) +{ + int i; + struct net_device *dev; + struct switch_enet_private *fep; + struct switch_platform_private *chip; + + chip = platform_get_drvdata(pdev); + if (chip) { + for (i = 0; i < chip->num_slots; i++) { + fep = chip->fep_host[i]; + dev = fep->netdev; + fep->sequence_done = 1; + unregister_netdev(dev); + free_netdev(dev); + + del_timer_sync(&fep->timer_aging); + } + + platform_set_drvdata(pdev, NULL); + kfree(chip); + + } else + printk(KERN_ERR "%s: can not get the " + "switch_platform_private %x\n", __func__, + (unsigned int)chip); + + return 0; +} + +static struct platform_driver eth_switch_driver = { + .probe = eth_switch_probe, + .remove = eth_switch_remove, + .driver = { + .name = "mvf-switch", + .owner = THIS_MODULE, + }, +}; + +static int __init mvf_switch_init(void) +{ + return platform_driver_register(ð_switch_driver);; +} + +static void __exit mvf_switch_exit(void) +{ + platform_driver_unregister(ð_switch_driver); +} + +module_init(mvf_switch_init); +module_exit(mvf_switch_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/mvf_switch.h b/drivers/net/mvf_switch.h new file mode 100644 index 000000000000..7e40414836d5 --- /dev/null +++ b/drivers/net/mvf_switch.h @@ -0,0 +1,661 @@ +/* + * mvfswitch -- L2 Switch Controller for mvf SoC + * processors. + * + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + */ +#ifndef SWITCH_H +#define SWITCH_H + +/* + * The Switch stores dest/src/type, data, and checksum for receive packets. + */ +#define PKT_MAXBUF_SIZE 1518 +#define PKT_MINBUF_SIZE 64 +#define PKT_MAXBLR_SIZE 1520 + +/* + * The 5441x RX control register also contains maximum frame + * size bits. + */ +#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) + +/* + * Some hardware gets it MAC address out of local flash memory. + * if this is non-zero then assume it is the address to get MAC from. + */ +#define FEC_FLASHMAC 0 + +/* The number of Tx and Rx buffers. These are allocated from the page + * pool. The code may assume these are power of two, so it it best + * to keep them that size. + * We don't need to allocate pages for the transmitter. We just use + * the skbuffer directly. + */ +#ifdef CONFIG_SWITCH_DMA_USE_SRAM +#define SWITCH_ENET_RX_PAGES 6 +#else +#define SWITCH_ENET_RX_PAGES 8 +#endif + +#define SWITCH_ENET_RX_FRSIZE 2048 +#define SWITCH_ENET_RX_FRPPG ( PAGE_SIZE / SWITCH_ENET_RX_FRSIZE ) +#define RX_RING_SIZE (SWITCH_ENET_RX_FRPPG * SWITCH_ENET_RX_PAGES) +#define SWITCH_ENET_TX_FRSIZE 2048 +#define SWITCH_ENET_TX_FRPPG (PAGE_SIZE / SWITCH_ENET_TX_FRSIZE) + +#ifdef CONFIG_SWITCH_DMA_USE_SRAM +#define TX_RING_SIZE 8 /* Must be power of two */ +#define TX_RING_MOD_MASK 7 /* for this to work */ +#else +#define TX_RING_SIZE 16 /* Must be power of two */ +#define TX_RING_MOD_MASK 15 /* for this to work */ +#endif + +#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) +#error "L2SWITCH: descriptor ring size constants too large" +#endif + +/*unsigned long MCF_ESW_LOOKUP_MEM;*/ +#if 0 +#define MCF_ESW_REVISION (*(volatile unsigned long *)(0xFC0DC000)) +#define MCF_ESW_PER (*(volatile unsigned long *)(0xFC0DC008)) +#define MCF_ESW_VLANV (*(volatile unsigned long *)(0xFC0DC010)) +#define MCF_ESW_DBCR (*(volatile unsigned long *)(0xFC0DC014)) +#define MCF_ESW_DMCR (*(volatile unsigned long *)(0xFC0DC018)) +#define MCF_ESW_BKLR (*(volatile unsigned long *)(0xFC0DC01C)) +#define MCF_ESW_BMPC (*(volatile unsigned long *)(0xFC0DC020)) +#define MCF_ESW_MODE (*(volatile unsigned long *)(0xFC0DC024)) + +#define MCF_ESW_ISR (*(volatile unsigned long *)(0xFC0DC400)) +#define MCF_ESW_IMR (*(volatile unsigned long *)(0xFC0DC404)) +#define MCF_ESW_TDAR (*(volatile unsigned long *)(0xFC0DC418)) +#define MCF_ESW_LOOKUP_MEM (*(volatile unsigned long *)(0xFC0E0000)) + +#define MCF_PPMCR0 (*(volatile unsigned short *)(0xFC04002D)) +#define MCF_PPMHR0 (*(volatile unsigned long *)(0xFC040030)) +#endif + +#if 0 +// for compile +#define MCF_FEC_EIR0 (*(volatile unsigned long *)(0xFC0D4004)) +#define MCF_FEC_EIR1 (*(volatile unsigned long *)(0xFC0D8004)) +#define MCF_FEC_EIMR0 (*(volatile unsigned long *)(0xFC0D4008)) +#define MCF_FEC_EIMR1 (*(volatile unsigned long *)(0xFC0D8008)) +#define MCF_FEC_MMFR0 (*(volatile unsigned long *)(0xFC0D4040)) +#define MCF_FEC_MMFR1 (*(volatile unsigned long *)(0xFC0D8040)) +#define MCF_FEC_MSCR0 (*(volatile unsigned long *)(0xFC0D4044)) +#define MCF_FEC_MSCR1 (*(volatile unsigned long *)(0xFC0D8044)) +#define MCF_FEC_RCR0 (*(volatile unsigned long *)(0xFC0D4084)) +#define MCF_FEC_RCR1 (*(volatile unsigned long *)(0xFC0D8084)) +#define MCF_FEC_TCR0 (*(volatile unsigned long *)(0xFC0D40C4)) +#define MCF_FEC_TCR1 (*(volatile unsigned long *)(0xFC0D80C4)) +#define MCF_FEC_ECR0 (*(volatile unsigned long *)(0xFC0D4024)) +#define MCF_FEC_ECR1 (*(volatile unsigned long *)(0xFC0D8024)) +#else +// from fec.h +// #define FEC_R_CNTRL 0x084 /* Receive control reg */ +// #define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */ +// #define FEC_IEVENT 0x004 /* Interrupt event reg */ +// #define FEC_IMASK 0x008 +// #define FEC_MII_DATA 0x040 /* MII manage frame reg */ +// #define FEC_MII_SPEED 0x044 /* MII speed control reg */ +// #define FEC_ECNTRL 0x024 /* Ethernet control reg */ + +#endif + +#define MCF_FEC_RCR_PROM (0x00000008) +#define MCF_FEC_RCR_RMII_MODE (0x00000100) +#define MCF_FEC_RCR_MAX_FL(x) (((x)&0x00003FFF)<<16) +#define MCF_FEC_RCR_CRC_FWD (0x00004000) +#define MCF_FEC_TCR_FDEN (0x00000004) +#define MCF_FEC_ECR_ETHER_EN (0x00000002) +#define MCF_FEC_ECR_ENA_1588 (0x00000010) + + +/*=============================================================*/ +#define LEARNING_AGING_TIMER (10 * HZ) + +/******************************************************************************/ +/* Recieve is empty */ +#define BD_SC_EMPTY ((unsigned short)0x8000) + +/* Transmit is ready */ +#define BD_SC_READY ((unsigned short)0x8000) + +/* Last buffer descriptor */ +#define BD_SC_WRAP ((unsigned short)0x2000) + +/* Interrupt on change */ +#define BD_SC_INTRPT ((unsigned short)0x1000) + +/* Continous mode */ +#define BD_SC_CM ((unsigned short)0x0200) + +/* Rec'd too many idles */ +#define BD_SC_ID ((unsigned short)0x0100) + +/* xmt preamble */ +#define BD_SC_P ((unsigned short)0x0100) + +/* Break received */ +#define BD_SC_BR ((unsigned short)0x0020) + +/* Framing error */ +#define BD_SC_FR ((unsigned short)0x0010) + +/* Parity error */ +#define BD_SC_PR ((unsigned short)0x0008) + +/* Overrun */ +#define BD_SC_OV ((unsigned short)0x0002) +#define BD_SC_CD ((unsigned short)0x0001) + + +/* + * Buffer descriptor control/status used by Ethernet receive. + */ +#define BD_ENET_RX_EMPTY ((unsigned short)0x8000) +#define BD_ENET_RX_WRAP ((unsigned short)0x2000) +#define BD_ENET_RX_INTR ((unsigned short)0x1000) +#define BD_ENET_RX_LAST ((unsigned short)0x0800) +#define BD_ENET_RX_FIRST ((unsigned short)0x0400) +#define BD_ENET_RX_MISS ((unsigned short)0x0100) +#define BD_ENET_RX_LG ((unsigned short)0x0020) +#define BD_ENET_RX_NO ((unsigned short)0x0010) +#define BD_ENET_RX_SH ((unsigned short)0x0008) +#define BD_ENET_RX_CR ((unsigned short)0x0004) +#define BD_ENET_RX_OV ((unsigned short)0x0002) +#define BD_ENET_RX_CL ((unsigned short)0x0001) +/* All status bits */ +#define BD_ENET_RX_STATS ((unsigned short)0x013f) + + +/* + *Buffer descriptor control/status used by Ethernet transmit. + */ +#define BD_ENET_TX_READY ((unsigned short)0x8000) +#define BD_ENET_TX_PAD ((unsigned short)0x4000) +#define BD_ENET_TX_WRAP ((unsigned short)0x2000) +#define BD_ENET_TX_INTR ((unsigned short)0x1000) +#define BD_ENET_TX_LAST ((unsigned short)0x0800) +#define BD_ENET_TX_TC ((unsigned short)0x0400) +#define BD_ENET_TX_DEF ((unsigned short)0x0200) +#define BD_ENET_TX_HB ((unsigned short)0x0100) +#define BD_ENET_TX_LC ((unsigned short)0x0080) +#define BD_ENET_TX_RL ((unsigned short)0x0040) +#define BD_ENET_TX_RCMASK ((unsigned short)0x003c) +#define BD_ENET_TX_UN ((unsigned short)0x0002) +#define BD_ENET_TX_CSL ((unsigned short)0x0001) +/* All status bits */ +#define BD_ENET_TX_STATS ((unsigned short)0x03ff) + +/*Copy from validation code */ +#define RX_BUFFER_SIZE 1520 +#define TX_BUFFER_SIZE 1520 +#define NUM_RXBDS 20 +#define NUM_TXBDS 20 + +#define TX_BD_R 0x8000 +#define TX_BD_TO1 0x4000 +#define TX_BD_W 0x2000 +#define TX_BD_TO2 0x1000 +#define TX_BD_L 0x0800 +#define TX_BD_TC 0x0400 + +#define TX_BD_INT 0x40000000 +#define TX_BD_TS 0x20000000 +#define TX_BD_PINS 0x10000000 +#define TX_BD_IINS 0x08000000 +#define TX_BD_TXE 0x00008000 +#define TX_BD_UE 0x00002000 +#define TX_BD_EE 0x00001000 +#define TX_BD_FE 0x00000800 +#define TX_BD_LCE 0x00000400 +#define TX_BD_OE 0x00000200 +#define TX_BD_TSE 0x00000100 +#define TX_BD_BDU 0x80000000 + +#define RX_BD_E 0x8000 +#define RX_BD_R01 0x4000 +#define RX_BD_W 0x2000 +#define RX_BD_R02 0x1000 +#define RX_BD_L 0x0800 +#define RX_BD_M 0x0100 +#define RX_BD_BC 0x0080 +#define RX_BD_MC 0x0040 +#define RX_BD_LG 0x0020 +#define RX_BD_NO 0x0010 +#define RX_BD_CR 0x0004 +#define RX_BD_OV 0x0002 +#define RX_BD_TR 0x0001 + +#define RX_BD_ME 0x80000000 +#define RX_BD_PE 0x04000000 +#define RX_BD_CE 0x02000000 +#define RX_BD_UC 0x01000000 +#define RX_BD_INT 0x00800000 +#define RX_BD_ICE 0x00000020 +#define RX_BD_PCR 0x00000010 +#define RX_BD_VLAN 0x00000004 +#define RX_BD_IPV6 0x00000002 +#define RX_BD_FRAG 0x00000001 +#define RX_BD_BDU 0x80000000 +/****************************************************************************/ + +/* Address Table size in bytes(2048 64bit entry ) */ +#define ESW_ATABLE_MEM_SIZE (2048*8) +/* How many 64-bit elements fit in the address table */ +#define ESW_ATABLE_MEM_NUM_ENTRIES (2048) +/* Address Table Maximum number of entries in each Slot */ +#define ATABLE_ENTRY_PER_SLOT 8 +/* log2(ATABLE_ENTRY_PER_SLOT)*/ +#define ATABLE_ENTRY_PER_SLOT_bits 3 +/* entry size in byte */ +#define ATABLE_ENTRY_SIZE 8 +/* slot size in byte */ +#define ATABLE_SLOT_SIZE (ATABLE_ENTRY_PER_SLOT * ATABLE_ENTRY_SIZE) +/* width of timestamp variable (bits) within address table entry */ +#define AT_DENTRY_TIMESTAMP_WIDTH 10 +/* number of bits for port number storage */ +#define AT_DENTRY_PORT_WIDTH 4 +/* number of bits for port bitmask number storage */ +#define AT_SENTRY_PORT_WIDTH 7 +/* address table static entry port bitmask start address bit */ +#define AT_SENTRY_PORTMASK_shift 21 +/* number of bits for port priority storage */ +#define AT_SENTRY_PRIO_WIDTH 7 +/* address table static entry priority start address bit */ +#define AT_SENTRY_PRIO_shift 18 +/* address table dynamic entry port start address bit */ +#define AT_DENTRY_PORT_shift 28 +/* address table dynamic entry timestamp start address bit */ +#define AT_DENTRY_TIME_shift 18 +/* address table entry record type start address bit */ +#define AT_ENTRY_TYPE_shift 17 +/* address table entry record type bit: 1 static, 0 dynamic */ +#define AT_ENTRY_TYPE_STATIC 1 +#define AT_ENTRY_TYPE_DYNAMIC 0 +/* address table entry record valid start address bit */ +#define AT_ENTRY_VALID_shift 16 +#define AT_ENTRY_RECORD_VALID 1 + +#define AT_EXTRACT_VALID(x) \ + ((x >> AT_ENTRY_VALID_shift) & AT_ENTRY_RECORD_VALID) + +#define AT_EXTRACT_PORTMASK(x) \ + ((x >> AT_SENTRY_PORTMASK_shift) & AT_SENTRY_PORT_WIDTH) + +#define AT_EXTRACT_PRIO(x) \ + ((x >> AT_SENTRY_PRIO_shift) & AT_SENTRY_PRIO_WIDTH) + +/* return block corresponding to the 8 bit hash value calculated */ +#define GET_BLOCK_PTR(hash) (hash << 3) +#define AT_EXTRACT_TIMESTAMP(x) \ + ((x >> AT_DENTRY_TIME_shift) & ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1)) +#define AT_EXTRACT_PORT(x) \ + ((x >> AT_DENTRY_PORT_shift) & ((1 << AT_DENTRY_PORT_WIDTH)-1)) +#define AT_SEXTRACT_PORT(x) \ + ((~((x >> AT_SENTRY_PORTMASK_shift) & \ + ((1 << AT_DENTRY_PORT_WIDTH)-1))) >> 1) +#define TIMEDELTA(newtime, oldtime) \ + ((newtime - oldtime) & \ + ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1)) + +#define AT_EXTRACT_IP_PROTOCOL(x) ((x >> 8) & 0xff) +#define AT_EXTRACT_TCP_UDP_PORT(x) ((x >> 16) & 0xffff) + +/* increment time value respecting modulo. */ +#define TIMEINCREMENT(time) \ + ((time) = ((time)+1) & ((1 << AT_DENTRY_TIMESTAMP_WIDTH)-1)) +/* ------------------------------------------------------------------------- */ +/* Bit definitions and macros for MCF_ESW_REVISION */ +#define MCF_ESW_REVISION_CORE_REVISION(x) (((x)&0x0000FFFF)<<0) +#define MCF_ESW_REVISION_CUSTOMER_REVISION(x) (((x)&0x0000FFFF)<<16) + +/* Bit definitions and macros for MCF_ESW_PER */ +#define MCF_ESW_PER_TE0 (0x00000001) +#define MCF_ESW_PER_TE1 (0x00000002) +#define MCF_ESW_PER_TE2 (0x00000004) +#define MCF_ESW_PER_RE0 (0x00010000) +#define MCF_ESW_PER_RE1 (0x00020000) +#define MCF_ESW_PER_RE2 (0x00040000) + +/* Bit definitions and macros for MCF_ESW_VLANV */ +#define MCF_ESW_VLANV_VV0 (0x00000001) +#define MCF_ESW_VLANV_VV1 (0x00000002) +#define MCF_ESW_VLANV_VV2 (0x00000004) +#define MCF_ESW_VLANV_DU0 (0x00010000) +#define MCF_ESW_VLANV_DU1 (0x00020000) +#define MCF_ESW_VLANV_DU2 (0x00040000) + +/* Bit definitions and macros for MCF_ESW_DBCR */ +#define MCF_ESW_DBCR_P0 (0x00000001) +#define MCF_ESW_DBCR_P1 (0x00000002) +#define MCF_ESW_DBCR_P2 (0x00000004) + +/* Bit definitions and macros for MCF_ESW_DMCR */ +#define MCF_ESW_DMCR_P0 (0x00000001) +#define MCF_ESW_DMCR_P1 (0x00000002) +#define MCF_ESW_DMCR_P2 (0x00000004) + +/* Bit definitions and macros for MCF_ESW_BKLR */ +#define MCF_ESW_BKLR_BE0 (0x00000001) +#define MCF_ESW_BKLR_BE1 (0x00000002) +#define MCF_ESW_BKLR_BE2 (0x00000004) +#define MCF_ESW_BKLR_LD0 (0x00010000) +#define MCF_ESW_BKLR_LD1 (0x00020000) +#define MCF_ESW_BKLR_LD2 (0x00040000) + +/* Bit definitions and macros for MCF_ESW_BMPC */ +#define MCF_ESW_BMPC_PORT(x) (((x)&0x0000000F)<<0) +#define MCF_ESW_BMPC_MSG_TX (0x00000020) +#define MCF_ESW_BMPC_EN (0x00000040) +#define MCF_ESW_BMPC_DIS (0x00000080) +#define MCF_ESW_BMPC_PRIORITY(x) (((x)&0x00000007)<<13) +#define MCF_ESW_BMPC_PORTMASK(x) (((x)&0x00000007)<<16) + +/* Bit definitions and macros for MCF_ESW_MODE */ +#define MCF_ESW_MODE_SW_RST (0x00000001) +#define MCF_ESW_MODE_SW_EN (0x00000002) +#define MCF_ESW_MODE_STOP (0x00000080) +#define MCF_ESW_MODE_CRC_TRAN (0x00000100) +#define MCF_ESW_MODE_P0CT (0x00000200) +#define MCF_ESW_MODE_STATRST (0x80000000) + +/* Bit definitions and macros for MCF_ESW_VIMSEL */ +#define MCF_ESW_VIMSEL_IM0(x) (((x)&0x00000003)<<0) +#define MCF_ESW_VIMSEL_IM1(x) (((x)&0x00000003)<<2) +#define MCF_ESW_VIMSEL_IM2(x) (((x)&0x00000003)<<4) + +/* Bit definitions and macros for MCF_ESW_VOMSEL */ +#define MCF_ESW_VOMSEL_OM0(x) (((x)&0x00000003)<<0) +#define MCF_ESW_VOMSEL_OM1(x) (((x)&0x00000003)<<2) +#define MCF_ESW_VOMSEL_OM2(x) (((x)&0x00000003)<<4) + +/* Bit definitions and macros for MCF_ESW_VIMEN */ +#define MCF_ESW_VIMEN_EN0 (0x00000001) +#define MCF_ESW_VIMEN_EN1 (0x00000002) +#define MCF_ESW_VIMEN_EN2 (0x00000004) + +/* Bit definitions and macros for MCF_ESW_VID */ +#define MCF_ESW_VID_TAG(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_MCR */ +#define MCF_ESW_MCR_PORT(x) (((x)&0x0000000F)<<0) +#define MCF_ESW_MCR_MEN (0x00000010) +#define MCF_ESW_MCR_INGMAP (0x00000020) +#define MCF_ESW_MCR_EGMAP (0x00000040) +#define MCF_ESW_MCR_INGSA (0x00000080) +#define MCF_ESW_MCR_INGDA (0x00000100) +#define MCF_ESW_MCR_EGSA (0x00000200) +#define MCF_ESW_MCR_EGDA (0x00000400) + +/* Bit definitions and macros for MCF_ESW_EGMAP */ +#define MCF_ESW_EGMAP_EG0 (0x00000001) +#define MCF_ESW_EGMAP_EG1 (0x00000002) +#define MCF_ESW_EGMAP_EG2 (0x00000004) + +/* Bit definitions and macros for MCF_ESW_INGMAP */ +#define MCF_ESW_INGMAP_ING0 (0x00000001) +#define MCF_ESW_INGMAP_ING1 (0x00000002) +#define MCF_ESW_INGMAP_ING2 (0x00000004) + +/* Bit definitions and macros for MCF_ESW_INGSAL */ +#define MCF_ESW_INGSAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_INGSAH */ +#define MCF_ESW_INGSAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_INGDAL */ +#define MCF_ESW_INGDAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_INGDAH */ +#define MCF_ESW_INGDAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_ENGSAL */ +#define MCF_ESW_ENGSAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_ENGSAH */ +#define MCF_ESW_ENGSAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_ENGDAL */ +#define MCF_ESW_ENGDAL_ADDLOW(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_ENGDAH */ +#define MCF_ESW_ENGDAH_ADDHIGH(x) (((x)&0x0000FFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_MCVAL */ +#define MCF_ESW_MCVAL_COUNT(x) (((x)&0x000000FF)<<0) + +/* Bit definitions and macros for MCF_ESW_MMSR */ +#define MCF_ESW_MMSR_BUSY (0x00000001) +#define MCF_ESW_MMSR_NOCELL (0x00000002) +#define MCF_ESW_MMSR_MEMFULL (0x00000004) +#define MCF_ESW_MMSR_MFLATCH (0x00000008) +#define MCF_ESW_MMSR_DQ_GRNT (0x00000040) +#define MCF_ESW_MMSR_CELLS_AVAIL(x) (((x)&0x000000FF)<<16) + +/* Bit definitions and macros for MCF_ESW_LMT */ +#define MCF_ESW_LMT_THRESH(x) (((x)&0x000000FF)<<0) + +/* Bit definitions and macros for MCF_ESW_LFC */ +#define MCF_ESW_LFC_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_PCSR */ +#define MCF_ESW_PCSR_PC0 (0x00000001) +#define MCF_ESW_PCSR_PC1 (0x00000002) +#define MCF_ESW_PCSR_PC2 (0x00000004) + +/* Bit definitions and macros for MCF_ESW_IOSR */ +#define MCF_ESW_IOSR_OR0 (0x00000001) +#define MCF_ESW_IOSR_OR1 (0x00000002) +#define MCF_ESW_IOSR_OR2 (0x00000004) + +/* Bit definitions and macros for MCF_ESW_QWT */ +#define MCF_ESW_QWT_Q0WT(x) (((x)&0x0000001F)<<0) +#define MCF_ESW_QWT_Q1WT(x) (((x)&0x0000001F)<<8) +#define MCF_ESW_QWT_Q2WT(x) (((x)&0x0000001F)<<16) +#define MCF_ESW_QWT_Q3WT(x) (((x)&0x0000001F)<<24) + +/* Bit definitions and macros for MCF_ESW_P0BCT */ +#define MCF_ESW_P0BCT_THRESH(x) (((x)&0x000000FF)<<0) + +/* Bit definitions and macros for MCF_ESW_P0FFEN */ +#define MCF_ESW_P0FFEN_FEN (0x00000001) +#define MCF_ESW_P0FFEN_FD(x) (((x)&0x00000003)<<2) + +/* Bit definitions and macros for MCF_ESW_PSNP */ +#define MCF_ESW_PSNP_EN (0x00000001) +#define MCF_ESW_PSNP_MODE(x) (((x)&0x00000003)<<1) +#define MCF_ESW_PSNP_CD (0x00000008) +#define MCF_ESW_PSNP_CS (0x00000010) +#define MCF_ESW_PSNP_PORT_COMPARE(x) (((x)&0x0000FFFF)<<16) + +/* Bit definitions and macros for MCF_ESW_IPSNP */ +#define MCF_ESW_IPSNP_EN (0x00000001) +#define MCF_ESW_IPSNP_MODE(x) (((x)&0x00000003)<<1) +#define MCF_ESW_IPSNP_PROTOCOL(x) (((x)&0x000000FF)<<8) + +/* Bit definitions and macros for MCF_ESW_PVRES */ +#define MCF_ESW_PVRES_PRI0(x) (((x)&0x00000007)<<0) +#define MCF_ESW_PVRES_PRI1(x) (((x)&0x00000007)<<3) +#define MCF_ESW_PVRES_PRI2(x) (((x)&0x00000007)<<6) +#define MCF_ESW_PVRES_PRI3(x) (((x)&0x00000007)<<9) +#define MCF_ESW_PVRES_PRI4(x) (((x)&0x00000007)<<12) +#define MCF_ESW_PVRES_PRI5(x) (((x)&0x00000007)<<15) +#define MCF_ESW_PVRES_PRI6(x) (((x)&0x00000007)<<18) +#define MCF_ESW_PVRES_PRI7(x) (((x)&0x00000007)<<21) + +/* Bit definitions and macros for MCF_ESW_IPRES */ +#define MCF_ESW_IPRES_ADDRESS(x) (((x)&0x000000FF)<<0) +#define MCF_ESW_IPRES_IPV4SEL (0x00000100) +#define MCF_ESW_IPRES_PRI0(x) (((x)&0x00000003)<<9) +#define MCF_ESW_IPRES_PRI1(x) (((x)&0x00000003)<<11) +#define MCF_ESW_IPRES_PRI2(x) (((x)&0x00000003)<<13) +#define MCF_ESW_IPRES_READ (0x80000000) + +/* Bit definitions and macros for MCF_ESW_PRES */ +#define MCF_ESW_PRES_VLAN (0x00000001) +#define MCF_ESW_PRES_IP (0x00000002) +#define MCF_ESW_PRES_MAC (0x00000004) +#define MCF_ESW_PRES_DFLT_PRI(x) (((x)&0x00000007)<<4) + +/* Bit definitions and macros for MCF_ESW_PID */ +#define MCF_ESW_PID_VLANID(x) (((x)&0x0000FFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_VRES */ +#define MCF_ESW_VRES_P0 (0x00000001) +#define MCF_ESW_VRES_P1 (0x00000002) +#define MCF_ESW_VRES_P2 (0x00000004) +#define MCF_ESW_VRES_VLANID(x) (((x)&0x00000FFF)<<3) + +/* Bit definitions and macros for MCF_ESW_DISCN */ +#define MCF_ESW_DISCN_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_DISCB */ +#define MCF_ESW_DISCB_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_NDISCN */ +#define MCF_ESW_NDISCN_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_NDISCB */ +#define MCF_ESW_NDISCB_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_POQC */ +#define MCF_ESW_POQC_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_PMVID */ +#define MCF_ESW_PMVID_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_PMVTAG */ +#define MCF_ESW_PMVTAG_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_PBL */ +#define MCF_ESW_PBL_COUNT(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_ISR */ +#define MCF_ESW_ISR_EBERR (0x00000001) +#define MCF_ESW_ISR_RXB (0x00000002) +#define MCF_ESW_ISR_RXF (0x00000004) +#define MCF_ESW_ISR_TXB (0x00000008) +#define MCF_ESW_ISR_TXF (0x00000010) +#define MCF_ESW_ISR_QM (0x00000020) +#define MCF_ESW_ISR_OD0 (0x00000040) +#define MCF_ESW_ISR_OD1 (0x00000080) +#define MCF_ESW_ISR_OD2 (0x00000100) +#define MCF_ESW_ISR_LRN (0x00000200) + +/* Bit definitions and macros for MCF_ESW_IMR */ +#define MCF_ESW_IMR_EBERR (0x00000001) +#define MCF_ESW_IMR_RXB (0x00000002) +#define MCF_ESW_IMR_RXF (0x00000004) +#define MCF_ESW_IMR_TXB (0x00000008) +#define MCF_ESW_IMR_TXF (0x00000010) +#define MCF_ESW_IMR_QM (0x00000020) +#define MCF_ESW_IMR_OD0 (0x00000040) +#define MCF_ESW_IMR_OD1 (0x00000080) +#define MCF_ESW_IMR_OD2 (0x00000100) +#define MCF_ESW_IMR_LRN (0x00000200) + +/* Bit definitions and macros for MCF_ESW_RDSR */ +#define MCF_ESW_RDSR_ADDRESS(x) (((x)&0x3FFFFFFF)<<2) + +/* Bit definitions and macros for MCF_ESW_TDSR */ +#define MCF_ESW_TDSR_ADDRESS(x) (((x)&0x3FFFFFFF)<<2) + +/* Bit definitions and macros for MCF_ESW_MRBR */ +#define MCF_ESW_MRBR_SIZE(x) (((x)&0x000003FF)<<4) + +/* Bit definitions and macros for MCF_ESW_RDAR */ +#define MCF_ESW_RDAR_R_DES_ACTIVE (0x01000000) + +/* Bit definitions and macros for MCF_ESW_TDAR */ +#define MCF_ESW_TDAR_X_DES_ACTIVE (0x01000000) + +/* Bit definitions and macros for MCF_ESW_LREC0 */ +#define MCF_ESW_LREC0_MACADDR0(x) (((x)&0xFFFFFFFF)<<0) + +/* Bit definitions and macros for MCF_ESW_LREC1 */ +#define MCF_ESW_LREC1_MACADDR1(x) (((x)&0x0000FFFF)<<0) +#define MCF_ESW_LREC1_HASH(x) (((x)&0x000000FF)<<16) +#define MCF_ESW_LREC1_SWPORT(x) (((x)&0x00000003)<<24) + +/* Bit definitions and macros for MCF_ESW_LSR */ +#define MCF_ESW_LSR_DA (0x00000001) + +/* port mirroring port number match */ +#define MIRROR_EGRESS_PORT_MATCH 1 +#define MIRROR_INGRESS_PORT_MATCH 2 + +/* port mirroring mac address match */ +#define MIRROR_EGRESS_SOURCE_MATCH 1 +#define MIRROR_INGRESS_SOURCE_MATCH 2 +#define MIRROR_EGRESS_DESTINATION_MATCH 3 +#define MIRROR_INGRESS_DESTINATION_MATCH 4 + +/*-------------ioctl command ---------------------------------------*/ +#define ESW_SET_LEARNING_CONF 0x9101 +#define ESW_GET_LEARNING_CONF 0x9201 +#define ESW_SET_BLOCKING_CONF 0x9102 +#define ESW_GET_BLOCKING_CONF 0x9202 +#define ESW_SET_MULTICAST_CONF 0x9103 +#define ESW_GET_MULTICAST_CONF 0x9203 +#define ESW_SET_BROADCAST_CONF 0x9104 +#define ESW_GET_BROADCAST_CONF 0x9204 +#define ESW_SET_PORTENABLE_CONF 0x9105 +#define ESW_GET_PORTENABLE_CONF 0x9205 +#define ESW_SET_IP_SNOOP_CONF 0x9106 +#define ESW_GET_IP_SNOOP_CONF 0x9206 +#define ESW_SET_PORT_SNOOP_CONF 0x9107 +#define ESW_GET_PORT_SNOOP_CONF 0x9207 +#define ESW_SET_PORT_MIRROR_CONF 0x9108 +#define ESW_GET_PORT_MIRROR_CONF 0x9208 +#define ESW_SET_PIRORITY_VLAN 0x9109 +#define ESW_GET_PIRORITY_VLAN 0x9209 +#define ESW_SET_PIRORITY_IP 0x910A +#define ESW_GET_PIRORITY_IP 0x920A +#define ESW_SET_PIRORITY_MAC 0x910B +#define ESW_GET_PIRORITY_MAC 0x920B +#define ESW_SET_PIRORITY_DEFAULT 0x910C +#define ESW_GET_PIRORITY_DEFAULT 0x920C +#define ESW_SET_P0_FORCED_FORWARD 0x910D +#define ESW_GET_P0_FORCED_FORWARD 0x920D +#define ESW_SET_SWITCH_MODE 0x910E +#define ESW_GET_SWITCH_MODE 0x920E +#define ESW_SET_BRIDGE_CONFIG 0x910F +#define ESW_GET_BRIDGE_CONFIG 0x920F +#define ESW_SET_VLAN_OUTPUT_PROCESS 0x9110 +#define ESW_GET_VLAN_OUTPUT_PROCESS 0x9210 +#define ESW_SET_VLAN_INPUT_PROCESS 0x9111 +#define ESW_GET_VLAN_INPUT_PROCESS 0x9211 +#define ESW_SET_VLAN_DOMAIN_VERIFICATION 0x9112 +#define ESW_GET_VLAN_DOMAIN_VERIFICATION 0x9212 +#define ESW_SET_VLAN_RESOLUTION_TABLE 0x9113 +#define ESW_GET_VLAN_RESOLUTION_TABLE 0x9213 +#define ESW_GET_ENTRY_PORT_NUMBER 0x9214 +#define ESW_GET_LOOKUP_TABLE 0x9215 +#define ESW_GET_PORT_STATUS 0x9216 +#define ESW_SET_VLAN_ID 0x9114 +#define ESW_SET_VLAN_ID_CLEARED 0x9115 +#define ESW_SET_PORT_IN_VLAN_ID 0x9116 +#define ESW_SET_PORT_ENTRY_EMPTY 0x9117 +#define ESW_SET_OTHER_PORT_ENTRY_EMPTY 0x9118 +#define ESW_GET_PORT_ALL_STATUS 0x9217 +#define ESW_SET_PORT_MIRROR_CONF_PORT_MATCH 0x9119 +#define ESW_SET_PORT_MIRROR_CONF_ADDR_MATCH 0x911A + +#define ESW_GET_STATISTICS_STATUS 0x9221 +#define ESW_SET_OUTPUT_QUEUE_MEMORY 0x9125 +#define ESW_GET_OUTPUT_QUEUE_STATUS 0x9225 +#define ESW_UPDATE_STATIC_MACTABLE 0x9226 +#define ESW_CLEAR_ALL_MACTABLE 0x9227 +#define ESW_GET_USER_PID 0x9228 + +#endif /* SWITCH_H */ diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 80747d2d1118..af44c0df14e8 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -143,6 +143,21 @@ static struct phy_driver ks8041_driver = { .driver = { .owner = THIS_MODULE,}, }; +static struct phy_driver ks8041z_driver = { + .phy_id = PHY_ID_KSZ8041, + .phy_id_mask = 0x00ffffff, + .name = "Micrel KSZ8041", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause + | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, + .driver = { .owner = THIS_MODULE,}, +}; + static struct phy_driver ks8051_driver = { .phy_id = PHY_ID_KS8051, .phy_id_mask = 0x00ffffff, @@ -187,10 +202,15 @@ static struct phy_driver ksz9021_driver = { .driver = { .owner = THIS_MODULE, }, }; + static int __init ksphy_init(void) { int ret; + ret = phy_driver_register(&ks8041z_driver); + if (ret) + goto err1; + ret = phy_driver_register(&ks8001_driver); if (ret) goto err1; @@ -225,6 +245,7 @@ err1: static void __exit ksphy_exit(void) { + phy_driver_unregister(&ks8041z_driver); phy_driver_unregister(&ks8001_driver); phy_driver_unregister(&ks8737_driver); phy_driver_unregister(&ksz9021_driver); @@ -240,6 +261,7 @@ MODULE_AUTHOR("David J. Choi"); MODULE_LICENSE("GPL"); static struct mdio_device_id __maybe_unused micrel_tbl[] = { + { PHY_ID_KSZ8041, 0x00ffffff }, { PHY_ID_KSZ9021, 0x00ffffff }, { PHY_ID_KS8001, 0x00ffffff }, { PHY_ID_KS8737, 0x00ffffff }, diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index ba66c938ae0a..8ebf770c3af9 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -19,6 +19,7 @@ #include <linux/types.h> #include <linux/cdev.h> +#include <linux/netdevice.h> /* * Some conventions on how we handle peripherals on Freescale chips @@ -398,6 +399,21 @@ struct p1003_ts_platform_data { int (*hw_status) (void); }; +struct mvf_switch_platform_data { + int hash_table; + unsigned int *fec_hw; + unsigned int *switch_hw; + void (*request_intrs)(struct net_device *dev, irqreturn_t (*)(int, void *), void *irq_privatedata); + void (*set_mii)(struct net_device *dev); + void (*get_mac)(struct net_device *dev); + void (*enable_phy_intr)(void); + void (*disable_phy_intr)(void); + void (*phy_ack_intr)(void); + void (*localhw_setup)(void); + void (*uncache)(unsigned long addr); + void (*platform_flush_cache)(void); +}; + /* Returns non-zero if the current suspend operation would * lead to a deep sleep (i.e. power removed from the core, * instead of just the clock). diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index dd8da342a991..b073c57534b3 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -6,6 +6,7 @@ #define PHY_ID_KSZ9021 0x00221611 #define PHY_ID_KS8737 0x00221720 #define PHY_ID_KS8041 0x00221510 +#define PHY_ID_KSZ8041 0x00221512 #define PHY_ID_KS8051 0x00221550 /* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */ #define PHY_ID_KS8001 0x0022161A |