From 511e3036e3c108281edbabf60cf43963c336abf3 Mon Sep 17 00:00:00 2001 From: Richard Leitner Date: Mon, 27 Nov 2017 08:16:45 +0100 Subject: net: phy: harmonize phy_id{,_mask} data type Previously phy_id was u32 and phy_id_mask was unsigned int. As the phy_id_mask defines the important bits of the phy_id (and is therefore the same size) these two variables should be the same data type. Signed-off-by: Richard Leitner Reviewed-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index dc82a07cb4fd..e00fd9ce3bce 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -509,7 +509,7 @@ struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; - unsigned int phy_id_mask; + u32 phy_id_mask; u32 features; u32 flags; const void *driver_data; -- cgit v1.2.3 From 00fde79532d66d18cd0b21fdbd515f4b14078ccf Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 30 Nov 2017 23:46:19 +0100 Subject: net: phy: core: use genphy version of callbacks read_status and config_aneg per default read_status and config_aneg are the only mandatory callbacks and most of the time the generic implementation is used by drivers. So make the core fall back to the generic version if a driver doesn't implement the respective callback. Also currently the core doesn't seem to verify that drivers implement the mandatory calls. If a driver doesn't do so we'd just get a NPE. With this patch this potential issue doesn't exit any longer. Signed-off-by: Heiner Kallweit Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phy.h | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index e00fd9ce3bce..4962af37722a 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -497,13 +497,13 @@ struct phy_device { * flags: A bitfield defining certain other features this PHY * supports (like interrupts) * - * The drivers must implement config_aneg and read_status. All - * other functions are optional. Note that none of these - * functions should be called from interrupt time. The goal is - * for the bus read/write functions to be able to block when the - * bus transaction is happening, and be freed up by an interrupt - * (The MPC85xx has this ability, though it is not currently - * supported in the driver). + * All functions are optional. If config_aneg or read_status + * are not implemented, the phy core uses the genphy versions. + * Note that none of these functions should be called from + * interrupt time. The goal is for the bus read/write functions + * to be able to block when the bus transaction is happening, + * and be freed up by an interrupt (The MPC85xx has this ability, + * though it is not currently supported in the driver). */ struct phy_driver { struct mdio_driver_common mdiodrv; @@ -841,14 +841,6 @@ int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); -static inline int phy_read_status(struct phy_device *phydev) -{ - if (!phydev->drv) - return -EIO; - - return phydev->drv->read_status(phydev); -} - #define phydev_err(_phydev, format, args...) \ dev_err(&_phydev->mdio.dev, format, ##args) @@ -890,6 +882,17 @@ int genphy_c45_read_pma(struct phy_device *phydev); int genphy_c45_pma_setup_forced(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); +static inline int phy_read_status(struct phy_device *phydev) +{ + if (!phydev->drv) + return -EIO; + + if (phydev->drv->read_status) + return phydev->drv->read_status(phydev); + else + return genphy_read_status(phydev); +} + void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver, struct module *owner); -- cgit v1.2.3 From 118b4aa25d90d0930611b71dd28a749c67309ccb Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 1 Dec 2017 15:08:55 -0800 Subject: net: xdp: avoid output parameters when querying XDP prog The output parameters will get unwieldy if we want to add more information about the program. Simply pass the entire struct netdev_bpf in. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- include/linux/netdevice.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ef789e1d679e..667bdd3ad33e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3330,7 +3330,8 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); -u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id); +void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, + struct netdev_bpf *xdp); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); -- cgit v1.2.3 From 92f0292b35a09bb5f12a4184ac86668599bc233b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 1 Dec 2017 15:08:56 -0800 Subject: net: xdp: report flags program was installed with on query Some drivers enforce that flags on program replacement and removal must match the flags passed on install. This leaves the possibility open to enable simultaneous loading of XDP programs both to HW and DRV. Allow such drivers to report the flags back to the stack. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- include/linux/netdevice.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 667bdd3ad33e..cc4ce7456e38 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -820,6 +820,8 @@ struct netdev_bpf { struct { u8 prog_attached; u32 prog_id; + /* flags with which program was installed */ + u32 prog_flags; }; /* BPF_OFFLOAD_VERIFIER_PREP */ struct { -- cgit v1.2.3 From 0487426fedf7cf800115bac7ea391de1e2e0fa5f Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 1 Dec 2017 11:01:49 -0800 Subject: vmbus: make hv_get_ringbuffer_availbytes local The last use of hv_get_ringbuffer_availbytes in drivers is now gone. Only used by the debug info routine so make it static. Also, add READ_ONCE() to avoid any possible issues with potentially volatile index values. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/hyperv.h | 22 ---------------------- 1 file changed, 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f3e97c5f94c9..5f8bd0cebddf 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -127,28 +127,6 @@ struct hv_ring_buffer_info { u32 priv_read_index; }; -/* - * - * hv_get_ringbuffer_availbytes() - * - * Get number of bytes available to read and to write to - * for the specified ring buffer - */ -static inline void -hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, - u32 *read, u32 *write) -{ - u32 read_loc, write_loc, dsize; - - /* Capture the read/write indices before they changed */ - read_loc = rbi->ring_buffer->read_index; - write_loc = rbi->ring_buffer->write_index; - dsize = rbi->ring_datasize; - - *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : - read_loc - write_loc; - *read = dsize - *write; -} static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) { -- cgit v1.2.3 From a6d1642dab0c49829cda61508cbdc97172815ff7 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Thu, 30 Nov 2017 23:55:15 +0100 Subject: net: phy: core: remove now uneeded disabling of interrupts After commits c974bdbc3e "net: phy: Use threaded IRQ, to allow IRQ from sleeping devices" and 664fcf123a30 "net: phy: Threaded interrupts allow some simplification" all relevant code pieces run in process context anyway and I don't think we need the disabling of interrupts any longer. Interestingly enough, latter commit already removed the comment explaining why interrupts need to be temporarily disabled. On my system phy interrupt mode works fine with this patch. However I may miss something, especially in the context of shared phy interrupts, therefore I'd appreciate if more people could test this. Signed-off-by: Heiner Kallweit Acked-by: Ard Biesheuvel Signed-off-by: David S. Miller --- include/linux/phy.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 4962af37722a..50030da01664 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -468,7 +468,6 @@ struct phy_device { /* Interrupt and Polling infrastructure */ struct work_struct phy_queue; struct delayed_work state_queue; - atomic_t irq_disable; struct mutex lock; -- cgit v1.2.3 From f19397a5c65665d66e3866b42056f1f58b7a366b Mon Sep 17 00:00:00 2001 From: Lawrence Brakmo Date: Fri, 1 Dec 2017 10:15:04 -0800 Subject: bpf: Add access to snd_cwnd and others in sock_ops Adds read access to snd_cwnd and srtt_us fields of tcp_sock. Since these fields are only valid if the socket associated with the sock_ops program call is a full socket, the field is_fullsock is also added to the bpf_sock_ops struct. If the socket is not a full socket, reading these fields returns 0. Note that in most cases it will not be necessary to check is_fullsock to know if there is a full socket. The context of the call, as specified by the 'op' field, can sometimes determine whether there is a full socket. The struct bpf_sock_ops has the following fields added: __u32 is_fullsock; /* Some TCP fields are only valid if * there is a full socket. If not, the * fields read as zero. */ __u32 snd_cwnd; __u32 srtt_us; /* Averaged RTT << 3 in usecs */ There is a new macro, SOCK_OPS_GET_TCP32(NAME), to make it easier to add read access to more 32 bit tcp_sock fields. Signed-off-by: Lawrence Brakmo Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 80b5b482cb46..0062302e1285 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -985,6 +985,7 @@ struct bpf_sock_ops_kern { u32 reply; u32 replylong[4]; }; + u32 is_fullsock; }; #endif /* __LINUX_FILTER_H__ */ -- cgit v1.2.3 From 365c1e64ad4831f3d1806ff3d7771839ec5eeb6b Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 1 Dec 2017 10:24:16 +0000 Subject: phy: add phy_interface_mode_is_8023z() helper Add and use phy_interface_mode_is_8023z() helper to identify the interface modes that use 802.3z negotiation. Use it in phylink's phylink_mac_an_restart(). Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phy.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 50030da01664..7570cb838410 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -761,6 +761,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) mode <= PHY_INTERFACE_MODE_RGMII_TXID; }; +/** + * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z + * negotiation + * @mode: one of &enum phy_interface_t + * + * Returns true if the phy interface mode uses the 16-bit negotiation + * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding) + */ +static inline bool phy_interface_mode_is_8023z(phy_interface_t mode) +{ + return mode == PHY_INTERFACE_MODE_1000BASEX || + mode == PHY_INTERFACE_MODE_2500BASEX; +} + /** * phy_interface_is_rgmii - Convenience function for testing if a PHY interface * is RGMII (all variants) -- cgit v1.2.3 From 86a362c49f3c0ba72f2e9217c704e10959c54bec Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 1 Dec 2017 10:24:26 +0000 Subject: phylink: get rid of separate Cisco SGMII and 802.3z modes Since the handling of SGMII and 802.3z is now the same, combine the MLO_AN_xxx constants into a single MLO_AN_INBAND, and use the PHY interface mode to distinguish between Cisco SGMII and 802.3z. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phylink.h | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index af67edd4ae38..cab22ad3bd6f 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -20,13 +20,12 @@ enum { MLO_AN_PHY = 0, /* Conventional PHY */ MLO_AN_FIXED, /* Fixed-link mode */ - MLO_AN_SGMII, /* Cisco SGMII protocol */ - MLO_AN_8023Z, /* 1000base-X protocol */ + MLO_AN_INBAND, /* In-band protocol */ }; static inline bool phylink_autoneg_inband(unsigned int mode) { - return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z; + return mode == MLO_AN_INBAND; } struct phylink_link_state { @@ -69,7 +68,7 @@ struct phylink_mac_ops { /** * mac_config: configure the MAC for the selected mode and state * @ndev: net_device structure for the MAC - * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII + * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_INBAND * @state: state structure * * The action performed depends on the currently selected mode: @@ -77,14 +76,10 @@ struct phylink_mac_ops { * %MLO_AN_FIXED, %MLO_AN_PHY: * set the specified speed, duplex, pause mode, and phy interface * mode in the provided @state. - * %MLO_AN_8023Z: - * place the link in 1000base-X mode, advertising the parameters - * given in advertising in @state. - * %MLO_AN_SGMII: - * place the link in Cisco SGMII mode - there is no advertisment - * to make as the PHY communicates the speed and duplex to the - * MAC over the in-band control word. Configuration of the pause - * mode is as per MLO_AN_PHY since this is not included. + * %MLO_AN_INBAND: + * place the link in an inband negotiation mode (such as + * 1000base-X or Cisco SGMII mode depending on the phy interface + * mode), advertising the parameters given in advertising in @state. */ void (*mac_config)(struct net_device *ndev, unsigned int mode, const struct phylink_link_state *state); -- cgit v1.2.3 From 939eae25d9a509bb9a91c9b4a3988a5cc61ddae4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 1 Dec 2017 10:24:37 +0000 Subject: phylink: remove phylink_init_eee() phylink_init_eee() serves no purpose, remove it. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phylink.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index cab22ad3bd6f..4d0f42da9078 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -123,7 +123,6 @@ int phylink_ethtool_set_pauseparam(struct phylink *, int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *); int phylink_ethtool_get_module_eeprom(struct phylink *, struct ethtool_eeprom *, u8 *); -int phylink_init_eee(struct phylink *, bool); int phylink_get_eee_err(struct phylink *); int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); -- cgit v1.2.3 From 8796c8923d9c4297723ae51c832ea166964970c1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 1 Dec 2017 10:24:47 +0000 Subject: phylink: add documentation for kernel APIs Add kernel-doc documentation for phylink kernel APIs, and link it into the networking kapi documentation under "Network device support". Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phylink.h | 183 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 135 insertions(+), 48 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 4d0f42da9078..30e9d0070377 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -28,10 +28,23 @@ static inline bool phylink_autoneg_inband(unsigned int mode) return mode == MLO_AN_INBAND; } +/** + * struct phylink_link_state - link state structure + * @advertising: ethtool bitmask containing advertised link modes + * @lp_advertising: ethtool bitmask containing link partner advertised link + * modes + * @interface: link &typedef phy_interface_t mode + * @speed: link speed, one of the SPEED_* constants. + * @duplex: link duplex mode, one of DUPLEX_* constants. + * @pause: link pause state, described by MLO_PAUSE_* constants. + * @link: true if the link is up. + * @an_enabled: true if autonegotiation is enabled/desired. + * @an_complete: true if autonegotiation has completed. + */ struct phylink_link_state { __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); - phy_interface_t interface; /* PHY_INTERFACE_xxx */ + phy_interface_t interface; int speed; int duplex; int pause; @@ -40,61 +53,135 @@ struct phylink_link_state { unsigned int an_complete:1; }; +/** + * struct phylink_mac_ops - MAC operations structure. + * @validate: Validate and update the link configuration. + * @mac_link_state: Read the current link state from the hardware. + * @mac_config: configure the MAC for the selected mode and state. + * @mac_an_restart: restart 802.3z BaseX autonegotiation. + * @mac_link_down: take the link down. + * @mac_link_up: allow the link to come up. + * + * The individual methods are described more fully below. + */ struct phylink_mac_ops { - /** - * validate: validate and update the link configuration - * @ndev: net_device structure associated with MAC - * @config: configuration to validate - * - * Update the %config->supported and %config->advertised masks - * clearing bits that can not be supported. - * - * Note: the PHY may be able to transform from one connection - * technology to another, so, eg, don't clear 1000BaseX just - * because the MAC is unable to support it. This is more about - * clearing unsupported speeds and duplex settings. - * - * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX - * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode - * based on %config->advertised and/or %config->speed. - */ void (*validate)(struct net_device *ndev, unsigned long *supported, struct phylink_link_state *state); - - /* Read the current link state from the hardware */ - int (*mac_link_state)(struct net_device *, struct phylink_link_state *); - - /* Configure the MAC */ - /** - * mac_config: configure the MAC for the selected mode and state - * @ndev: net_device structure for the MAC - * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_INBAND - * @state: state structure - * - * The action performed depends on the currently selected mode: - * - * %MLO_AN_FIXED, %MLO_AN_PHY: - * set the specified speed, duplex, pause mode, and phy interface - * mode in the provided @state. - * %MLO_AN_INBAND: - * place the link in an inband negotiation mode (such as - * 1000base-X or Cisco SGMII mode depending on the phy interface - * mode), advertising the parameters given in advertising in @state. - */ + int (*mac_link_state)(struct net_device *ndev, + struct phylink_link_state *state); void (*mac_config)(struct net_device *ndev, unsigned int mode, const struct phylink_link_state *state); - - /** - * mac_an_restart: restart 802.3z BaseX autonegotiation - * @ndev: net_device structure for the MAC - */ void (*mac_an_restart)(struct net_device *ndev); - - void (*mac_link_down)(struct net_device *, unsigned int mode); - void (*mac_link_up)(struct net_device *, unsigned int mode, - struct phy_device *); + void (*mac_link_down)(struct net_device *ndev, unsigned int mode); + void (*mac_link_up)(struct net_device *ndev, unsigned int mode, + struct phy_device *phy); }; +#if 0 /* For kernel-doc purposes only. */ +/** + * validate - Validate and update the link configuration + * @ndev: a pointer to a &struct net_device for the MAC. + * @supported: ethtool bitmask for supported link modes. + * @state: a pointer to a &struct phylink_link_state. + * + * Clear bits in the @supported and @state->advertising masks that + * are not supportable by the MAC. + * + * Note that the PHY may be able to transform from one connection + * technology to another, so, eg, don't clear 1000BaseX just + * because the MAC is unable to BaseX mode. This is more about + * clearing unsupported speeds and duplex settings. + * + * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX + * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode + * based on @state->advertising and/or @state->speed and update + * @state->interface accordingly. + */ +void validate(struct net_device *ndev, unsigned long *supported, + struct phylink_link_state *state); + +/** + * mac_link_state() - Read the current link state from the hardware + * @ndev: a pointer to a &struct net_device for the MAC. + * @state: a pointer to a &struct phylink_link_state. + * + * Read the current link state from the MAC, reporting the current + * speed in @state->speed, duplex mode in @state->duplex, pause mode + * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits, + * negotiation completion state in @state->an_complete, and link + * up state in @state->link. + */ +int mac_link_state(struct net_device *ndev, + struct phylink_link_state *state); + +/** + * mac_config() - configure the MAC for the selected mode and state + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @state: a pointer to a &struct phylink_link_state. + * + * The action performed depends on the currently selected mode: + * + * %MLO_AN_FIXED, %MLO_AN_PHY: + * Configure the specified @state->speed, @state->duplex and + * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode. + * + * %MLO_AN_INBAND: + * place the link in an inband negotiation mode (such as 802.3z + * 1000base-X or Cisco SGMII mode depending on the @state->interface + * mode). In both cases, link state management (whether the link + * is up or not) is performed by the MAC, and reported via the + * mac_link_state() callback. Changes in link state must be made + * by calling phylink_mac_change(). + * + * If in 802.3z mode, the link speed is fixed, dependent on the + * @state->interface. Duplex is negotiated, and pause is advertised + * according to @state->an_enabled, @state->pause and + * @state->advertising flags. Beware of MACs which only support full + * duplex at gigabit and higher speeds. + * + * If in Cisco SGMII mode, the link speed and duplex mode are passed + * in the serial bitstream 16-bit configuration word, and the MAC + * should be configured to read these bits and acknowledge the + * configuration word. Nothing is advertised by the MAC. The MAC is + * responsible for reading the configuration word and configuring + * itself accordingly. + */ +void mac_config(struct net_device *ndev, unsigned int mode, + const struct phylink_link_state *state); + +/** + * mac_an_restart() - restart 802.3z BaseX autonegotiation + * @ndev: a pointer to a &struct net_device for the MAC. + */ +void mac_an_restart(struct net_device *ndev); + +/** + * mac_link_down() - take the link down + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: link autonegotiation mode + * + * If @mode is not an in-band negotiation mode (as defined by + * phylink_autoneg_inband()), force the link down and disable any + * Energy Efficient Ethernet MAC configuration. + */ +void mac_link_down(struct net_device *ndev, unsigned int mode); + +/** + * mac_link_up() - allow the link to come up + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: link autonegotiation mode + * @phy: any attached phy + * + * If @mode is not an in-band negotiation mode (as defined by + * phylink_autoneg_inband()), allow the link to come up. If @phy + * is non-%NULL, configure Energy Efficient Ethernet by calling + * phy_init_eee() and perform appropriate MAC configuration for EEE. + */ +void mac_link_up(struct net_device *ndev, unsigned int mode, + struct phy_device *phy); +#endif + struct phylink *phylink_create(struct net_device *, struct device_node *, phy_interface_t iface, const struct phylink_mac_ops *ops); void phylink_destroy(struct phylink *); -- cgit v1.2.3 From 0a6fcd3fc14d3913966e9385d646bb05b5b47fae Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 1 Dec 2017 10:24:53 +0000 Subject: sfp: add documentation for kernel APIs Add kernel-doc documentation for sfp kernel APIs, and link it into the networking kapi documentation under "Network device support". Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/sfp.h | 50 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 4a906f560817..b6089fe72378 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -3,7 +3,7 @@ #include -struct __packed sfp_eeprom_base { +struct sfp_eeprom_base { u8 phys_id; u8 phys_ext_id; u8 connector; @@ -166,12 +166,12 @@ struct __packed sfp_eeprom_base { union { __be16 optical_wavelength; u8 cable_spec; - }; + } __packed; u8 reserved62; u8 cc_base; -}; +} __packed; -struct __packed sfp_eeprom_ext { +struct sfp_eeprom_ext { __be16 options; u8 br_max; u8 br_min; @@ -181,12 +181,21 @@ struct __packed sfp_eeprom_ext { u8 enhopts; u8 sff8472_compliance; u8 cc_ext; -}; - -struct __packed sfp_eeprom_id { +} __packed; + +/** + * struct sfp_eeprom_id - raw SFP module identification information + * @base: base SFP module identification structure + * @ext: extended SFP module identification structure + * + * See the SFF-8472 specification and related documents for the definition + * of these structure members. This can be obtained from + * ftp://ftp.seagate.com/sff + */ +struct sfp_eeprom_id { struct sfp_eeprom_base base; struct sfp_eeprom_ext ext; -}; +} __packed; /* SFP EEPROM registers */ enum { @@ -353,13 +362,26 @@ struct ethtool_modinfo; struct net_device; struct sfp_bus; +/** + * struct sfp_upstream_ops - upstream operations structure + * @module_insert: called after a module has been detected to determine + * whether the module is supported for the upstream device. + * @module_remove: called after the module has been removed. + * @link_down: called when the link is non-operational for whatever + * reason. + * @link_up: called when the link is operational. + * @connect_phy: called when an I2C accessible PHY has been detected + * on the module. + * @disconnect_phy: called when a module with an I2C accessible PHY has + * been removed. + */ struct sfp_upstream_ops { - int (*module_insert)(void *, const struct sfp_eeprom_id *id); - void (*module_remove)(void *); - void (*link_down)(void *); - void (*link_up)(void *); - int (*connect_phy)(void *, struct phy_device *); - void (*disconnect_phy)(void *); + int (*module_insert)(void *priv, const struct sfp_eeprom_id *id); + void (*module_remove)(void *priv); + void (*link_down)(void *priv); + void (*link_up)(void *priv); + int (*connect_phy)(void *priv, struct phy_device *); + void (*disconnect_phy)(void *priv); }; #if IS_ENABLED(CONFIG_SFP) -- cgit v1.2.3 From c19bb00070dd15b386fe22e7bd072e60779df050 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 1 Dec 2017 10:25:03 +0000 Subject: sfp: convert to fwnode Convert sfp-bus to use fwnode rather than device_node internally, so we can support more than just device tree firmware. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/sfp.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sfp.h b/include/linux/sfp.h index b6089fe72378..47ea32d3e816 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -356,7 +356,7 @@ enum { SFP_PAGE = 0x7f, }; -struct device_node; +struct fwnode_handle; struct ethtool_eeprom; struct ethtool_modinfo; struct net_device; @@ -397,7 +397,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, u8 *data); void sfp_upstream_start(struct sfp_bus *bus); void sfp_upstream_stop(struct sfp_bus *bus); -struct sfp_bus *sfp_register_upstream(struct device_node *np, +struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, struct net_device *ndev, void *upstream, const struct sfp_upstream_ops *ops); void sfp_unregister_upstream(struct sfp_bus *bus); @@ -441,7 +441,8 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus) { } -static inline struct sfp_bus *sfp_register_upstream(struct device_node *np, +static inline struct sfp_bus *sfp_register_upstream( + struct fwnode_handle *fwnode, struct net_device *ndev, void *upstream, const struct sfp_upstream_ops *ops) { -- cgit v1.2.3 From 8fa7b9b6af252cae46dc3466341873434316d34a Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 1 Dec 2017 10:25:09 +0000 Subject: phylink: convert to fwnode Convert phylink to fwnode, switching phylink_create() from taking a device_node to taking a fwnode_handle. This will allow other firmware systems to take advantage of sfp/phylink support. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phylink.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 30e9d0070377..4f0f452ff38d 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -7,6 +7,7 @@ struct device_node; struct ethtool_cmd; +struct fwnode_handle; struct net_device; enum { @@ -182,7 +183,7 @@ void mac_link_up(struct net_device *ndev, unsigned int mode, struct phy_device *phy); #endif -struct phylink *phylink_create(struct net_device *, struct device_node *, +struct phylink *phylink_create(struct net_device *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *ops); void phylink_destroy(struct phylink *); -- cgit v1.2.3 From 62b32379fd124fea521484ba7e220d8a449f0b59 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 4 Dec 2017 11:31:48 +0100 Subject: flow_dissector: dissect tunnel info outside __skb_flow_dissect() Move dissection of tunnel info to outside of the main flow dissection function, __skb_flow_dissect(). The sole user of this feature, the flower classifier, is updated to call tunnel info dissection directly, using skb_flow_dissect_tunnel_info(). This results in a slightly less complex implementation of __skb_flow_dissect(), in particular removing logic from that call path which is not used by the majority of users. The expense of this is borne by the flower classifier which now has to make an extra call for tunnel info dissection. This patch should not result in any behavioural change. Signed-off-by: Simon Horman Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a38c80e9f91e..b8e0da6c27d6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1211,6 +1211,11 @@ static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow, data, proto, nhoff, hlen, flags); } +void +skb_flow_dissect_tunnel_info(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) -- cgit v1.2.3 From bafbdd527d569c8200521f2f7579f65a044271be Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 4 Dec 2017 13:35:05 +0100 Subject: phylib: Add device reset GPIO support The PHY devices sometimes do have their reset signal (maybe even power supply?) tied to some GPIO and sometimes it also does happen that a boot loader does not leave it deasserted. So far this issue has been attacked from (as I believe) a wrong angle: by teaching the MAC driver to manipulate the GPIO in question; that solution, when applied to the device trees, led to adding the PHY reset GPIO properties to the MAC device node, with one exception: Cadence MACB driver which could handle the "reset-gpios" prop in a PHY device subnode. I believe that the correct approach is to teach the 'phylib' to get the MDIO device reset GPIO from the device tree node corresponding to this device -- which this patch is doing... Note that I had to modify the AT803x PHY driver as it would stop working otherwise -- it made use of the reset GPIO for its own purposes... Signed-off-by: Sergei Shtylyov Acked-by: Rob Herring [geert: Propagate actual errors from fwnode_get_named_gpiod()] [geert: Avoid destroying initial setup] [geert: Consolidate GPIO descriptor acquiring code] Signed-off-by: Geert Uytterhoeven Tested-by: Richard Leitner Signed-off-by: David S. Miller --- include/linux/mdio.h | 3 +++ include/linux/phy.h | 5 +++++ 2 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mdio.h b/include/linux/mdio.h index ca08ab16ecdc..92d4e55ffe67 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -12,6 +12,7 @@ #include #include +struct gpio_desc; struct mii_bus; /* Multiple levels of nesting are possible. However typically this is @@ -39,6 +40,7 @@ struct mdio_device { /* Bus address of the MDIO device (0-31) */ int addr; int flags; + struct gpio_desc *reset; }; #define to_mdio_device(d) container_of(d, struct mdio_device, dev) @@ -71,6 +73,7 @@ void mdio_device_free(struct mdio_device *mdiodev); struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); int mdio_device_register(struct mdio_device *mdiodev); void mdio_device_remove(struct mdio_device *mdiodev); +void mdio_device_reset(struct mdio_device *mdiodev, int value); int mdio_driver_register(struct mdio_driver *drv); void mdio_driver_unregister(struct mdio_driver *drv); int mdio_device_bus_match(struct device *dev, struct device_driver *drv); diff --git a/include/linux/phy.h b/include/linux/phy.h index 7570cb838410..d3037e2ffbc4 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -854,6 +854,11 @@ int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); +static inline void phy_device_reset(struct phy_device *phydev, int value) +{ + mdio_device_reset(&phydev->mdio, value); +} + #define phydev_err(_phydev, format, args...) \ dev_err(&_phydev->mdio.dev, format, ##args) -- cgit v1.2.3 From 4a86a4cf68b4996a656d8c666617e8d839f8f839 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 7 Dec 2017 09:57:59 -0800 Subject: net: skb_array: expose peek API This adds a peek routine to skb_array.h for use with qdisc. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/skb_array.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index 8621ffdeecbf..c7addf37d119 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -72,6 +72,11 @@ static inline bool __skb_array_empty(struct skb_array *a) return !__ptr_ring_peek(&a->ring); } +static inline struct sk_buff *__skb_array_peek(struct skb_array *a) +{ + return __ptr_ring_peek(&a->ring); +} + static inline bool skb_array_empty(struct skb_array *a) { return ptr_ring_empty(&a->ring); -- cgit v1.2.3 From 2e8d243e887c8802b373338ddff684aa0578be3b Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Thu, 7 Dec 2017 19:56:04 +0100 Subject: net: dsa: lan9303: Protect ALR operations with mutex ALR table operations are a sequence of related register operations which should be protected from concurrent access. The alr_cache should also be protected. Add alr_mutex doing that. Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/dsa/lan9303.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h index f48a85c377de..b6514c29563f 100644 --- a/include/linux/dsa/lan9303.h +++ b/include/linux/dsa/lan9303.h @@ -26,6 +26,7 @@ struct lan9303 { bool phy_addr_sel_strap; struct dsa_switch *ds; struct mutex indirect_mutex; /* protect indexed register access */ + struct mutex alr_mutex; /* protect ALR access */ const struct lan9303_phy_ops *ops; bool is_bridged; /* true if port 1 and 2 are bridged */ -- cgit v1.2.3 From 5e54b3c1202765ae62de24a160f1eaf6b0ebf9d4 Mon Sep 17 00:00:00 2001 From: Girish Moodalbail Date: Fri, 8 Dec 2017 06:03:26 -0800 Subject: macvlan: fix memory hole in macvlan_dev Move 'macaddr_count' from after 'netpoll' to after 'nest_level' to pack and reduce a memory hole. Fixes: 88ca59d1aaf28c25 (macvlan: remove unused fields in struct macvlan_dev) Signed-off-by: Girish Moodalbail Signed-off-by: David S. Miller --- include/linux/if_macvlan.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index bedf54b6f943..4cb7aeeafce0 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -30,10 +30,10 @@ struct macvlan_dev { enum macvlan_mode mode; u16 flags; int nest_level; + unsigned int macaddr_count; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif - unsigned int macaddr_count; }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, -- cgit v1.2.3 From 97a6ec4ac021f7fbec05c15a3aa0c4aaf0461af5 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 4 Dec 2017 10:31:41 -0800 Subject: rhashtable: Change rhashtable_walk_start to return void Most callers of rhashtable_walk_start don't care about a resize event which is indicated by a return value of -EAGAIN. So calls to rhashtable_walk_start are wrapped wih code to ignore -EAGAIN. Something like this is common: ret = rhashtable_walk_start(rhiter); if (ret && ret != -EAGAIN) goto out; Since zero and -EAGAIN are the only possible return values from the function this check is pointless. The condition never evaluates to true. This patch changes rhashtable_walk_start to return void. This simplifies code for the callers that ignore -EAGAIN. For the few cases where the caller cares about the resize event, particularly where the table can be walked in mulitple parts for netlink or seq file dump, the function rhashtable_walk_start_check has been added that returns -EAGAIN on a resize event. Signed-off-by: Tom Herbert Acked-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 361c08e35dbc..13ccc483738d 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -378,7 +378,13 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); -int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); +int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); + +static inline void rhashtable_walk_start(struct rhashtable_iter *iter) +{ + (void)rhashtable_walk_start_check(iter); +} + void *rhashtable_walk_next(struct rhashtable_iter *iter); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); -- cgit v1.2.3 From 2db54b475ae918d274bfc276416c384ba95e9f94 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 4 Dec 2017 10:31:42 -0800 Subject: rhashtable: Add rhastable_walk_peek This function is like rhashtable_walk_next except that it only returns the current element in the inter and does not advance the iter. This patch also creates __rhashtable_walk_find_next. It finds the next element in the table when the entry cached in iter is NULL or at the end of a slot. __rhashtable_walk_find_next is called from rhashtable_walk_next and rhastable_walk_peek. end_of_table is an added field to the iter structure. This indicates that the end of table was reached (walker.tbl being NULL is not a sufficient condition for end of table). Signed-off-by: Tom Herbert Acked-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 13ccc483738d..542b1b265ac4 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -207,6 +207,7 @@ struct rhashtable_iter { struct rhashtable_walker walker; unsigned int slot; unsigned int skip; + bool end_of_table; }; static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) @@ -386,6 +387,7 @@ static inline void rhashtable_walk_start(struct rhashtable_iter *iter) } void *rhashtable_walk_next(struct rhashtable_iter *iter); +void *rhashtable_walk_peek(struct rhashtable_iter *iter); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); void rhashtable_free_and_destroy(struct rhashtable *ht, -- cgit v1.2.3 From 2b86093135d014624f668658e1636f22a3027fc2 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 4 Dec 2017 10:31:43 -0800 Subject: rhashtable: abstract out function to get hash Split out most of rht_key_hashfn which is calculating the hash into its own function. This way the hash function can be called separately to get the hash value. Acked-by: Thomas Graf Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 542b1b265ac4..c9df2527e0cd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -240,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); } -static inline unsigned int rht_key_hashfn( - struct rhashtable *ht, const struct bucket_table *tbl, - const void *key, const struct rhashtable_params params) +static inline unsigned int rht_key_get_hash(struct rhashtable *ht, + const void *key, const struct rhashtable_params params, + unsigned int hash_rnd) { unsigned int hash; /* params must be equal to ht->p if it isn't constant. */ if (!__builtin_constant_p(params.key_len)) - hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); + hash = ht->p.hashfn(key, ht->key_len, hash_rnd); else if (params.key_len) { unsigned int key_len = params.key_len; if (params.hashfn) - hash = params.hashfn(key, key_len, tbl->hash_rnd); + hash = params.hashfn(key, key_len, hash_rnd); else if (key_len & (sizeof(u32) - 1)) - hash = jhash(key, key_len, tbl->hash_rnd); + hash = jhash(key, key_len, hash_rnd); else - hash = jhash2(key, key_len / sizeof(u32), - tbl->hash_rnd); + hash = jhash2(key, key_len / sizeof(u32), hash_rnd); } else { unsigned int key_len = ht->p.key_len; if (params.hashfn) - hash = params.hashfn(key, key_len, tbl->hash_rnd); + hash = params.hashfn(key, key_len, hash_rnd); else - hash = jhash(key, key_len, tbl->hash_rnd); + hash = jhash(key, key_len, hash_rnd); } + return hash; +} + +static inline unsigned int rht_key_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const void *key, const struct rhashtable_params params) +{ + unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); + return rht_bucket_index(tbl, hash); } -- cgit v1.2.3 From 92f36cca5773cbaa78c46ccf49503964a52da294 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 4 Dec 2017 10:31:44 -0800 Subject: spinlock: Add library function to allocate spinlock buckets array Add two new library functions: alloc_bucket_spinlocks and free_bucket_spinlocks. These are used to allocate and free an array of spinlocks that are useful as locks for hash buckets. The interface specifies the maximum number of spinlocks in the array as well as a CPU multiplier to derive the number of spinlocks to allocate. The number allocated is rounded up to a power of two to make the array amenable to hash lookup. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/spinlock.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a39186194cd6..10fd28b118ee 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -414,4 +414,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) +int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, + size_t max_size, unsigned int cpu_mult, + gfp_t gfp); + +void free_bucket_spinlocks(spinlock_t *locks); + #endif /* __LINUX_SPINLOCK_H */ -- cgit v1.2.3 From 96b120b3c1397c90b64d1f4b2300fb7ce4aa8a68 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 8 Dec 2017 21:03:59 +0800 Subject: sctp: add asoc intl_enable negotiation during 4 shakehands asoc intl_enable will be set when local sp strm_interleave is set and there's I-DATA chunk in init and init_ack extensions, as said in section 2.2.1 of RFC8260. asoc intl_enable indicates all data will be sent as I-DATA chunks. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/linux/sctp.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sctp.h b/include/linux/sctp.h index da803dfc7a39..6d2bd64b6ffe 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -102,6 +102,9 @@ enum sctp_cid { /* AUTH Extension Section 4.1 */ SCTP_CID_AUTH = 0x0F, + /* sctp ndata 5.1. I-DATA */ + SCTP_CID_I_DATA = 0x40, + /* PR-SCTP Sec 3.2 */ SCTP_CID_FWD_TSN = 0xC0, -- cgit v1.2.3 From ad05a7a05ede28ba6dd935d9e932264a22518b1f Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 8 Dec 2017 21:04:00 +0800 Subject: sctp: add basic structures and make chunk function for idata sctp_idatahdr and sctp_idata_chunk are used to define and parse I-DATA chunk format, and sctp_make_idata is a function to build the chunk. The I-DATA Chunk Format is defined in section 2.1 of RFC8260. Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/linux/sctp.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 6d2bd64b6ffe..38e2cf66195f 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -243,6 +243,23 @@ struct sctp_data_chunk { struct sctp_datahdr data_hdr; }; +struct sctp_idatahdr { + __be32 tsn; + __be16 stream; + __be16 reserved; + __be32 mid; + union { + __u32 ppid; + __be32 fsn; + }; + __u8 payload[0]; +}; + +struct sctp_idata_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_idatahdr data_hdr; +}; + /* DATA Chuck Specific Flags */ enum { SCTP_DATA_MIDDLE_FRAG = 0x00, -- cgit v1.2.3 From 607065bad9931e72207b0cac365d7d4abc06bd99 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 10 Dec 2017 17:55:03 -0800 Subject: tcp: avoid integer overflows in tcp_rcv_space_adjust() When using large tcp_rmem[2] values (I did tests with 500 MB), I noticed overflows while computing rcvwin. Lets fix this before the following patch. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Acked-by: Wei Wang Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- include/linux/tcp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ca4a6361389b..4f93f0953c41 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -344,7 +344,7 @@ struct tcp_sock { /* Receiver queue space */ struct { - int space; + u32 space; u32 seq; u64 time; } rcvq_space; -- cgit v1.2.3 From f371b304f12e31fe30207c41ca7754564e0ea4dc Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 11 Dec 2017 11:39:02 -0800 Subject: bpf/tracing: allow user space to query prog array on the same tp Commit e87c6bc3852b ("bpf: permit multiple bpf attachments for a single perf event") added support to attach multiple bpf programs to a single perf event. Although this provides flexibility, users may want to know what other bpf programs attached to the same tp interface. Besides getting visibility for the underlying bpf system, such information may also help consolidate multiple bpf programs, understand potential performance issues due to a large array, and debug (e.g., one bpf program which overwrites return code may impact subsequent program results). Commit 2541517c32be ("tracing, perf: Implement BPF programs attached to kprobes") utilized the existing perf ioctl interface and added the command PERF_EVENT_IOC_SET_BPF to attach a bpf program to a tracepoint. This patch adds a new ioctl command, given a perf event fd, to query the bpf program array attached to the same perf tracepoint event. The new uapi ioctl command: PERF_EVENT_IOC_QUERY_BPF The new uapi/linux/perf_event.h structure: struct perf_event_query_bpf { __u32 ids_len; __u32 prog_cnt; __u32 ids[0]; }; User space provides buffer "ids" for kernel to copy to. When returning from the kernel, the number of available programs in the array is set in "prog_cnt". The usage: struct perf_event_query_bpf *query = malloc(sizeof(*query) + sizeof(u32) * ids_len); query.ids_len = ids_len; err = ioctl(pmu_efd, PERF_EVENT_IOC_QUERY_BPF, query); if (err == 0) { /* query.prog_cnt is the number of available progs, * number of progs in ids: (ids_len == 0) ? 0 : query.prog_cnt */ } else if (errno == ENOSPC) { /* query.ids_len number of progs copied, * query.prog_cnt is the number of available progs */ } else { /* other errors */ } Signed-off-by: Yonghong Song Acked-by: Peter Zijlstra (Intel) Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e55e4255a210..f812ac508e9f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -254,6 +254,7 @@ typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); +int bpf_event_query_prog_array(struct perf_event *event, void __user *info); int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); @@ -285,6 +286,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, struct bpf_prog *old_prog); +int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, + __u32 __user *prog_ids, u32 request_cnt, + __u32 __user *prog_cnt); int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, -- cgit v1.2.3 From 92ace9991da08827e809c2d120108a96a281e7fc Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 11 Dec 2017 11:36:46 -0500 Subject: add infrastructure for tagging functions as error injectable Using BPF we can override kprob'ed functions and return arbitrary values. Obviously this can be a bit unsafe, so make this feature opt-in for functions. Simply tag a function with KPROBE_ERROR_INJECT_SYMBOL in order to give BPF access to that function for error injection purposes. Signed-off-by: Josef Bacik Acked-by: Ingo Molnar Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 11 +++++++++++ include/linux/kprobes.h | 1 + include/linux/module.h | 5 +++++ 3 files changed, 17 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f812ac508e9f..93e15b9d80c7 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -580,4 +580,15 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto; void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#ifdef CONFIG_BPF_KPROBE_OVERRIDE +#define BPF_ALLOW_ERROR_INJECTION(fname) \ +static unsigned long __used \ + __attribute__((__section__("_kprobe_error_inject_list"))) \ + _eil_addr_##fname = (unsigned long)fname; +#else +#define BPF_ALLOW_ERROR_INJECTION(fname) +#endif +#endif + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 9440a2fc8893..963fd364f3d6 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -271,6 +271,7 @@ extern bool arch_kprobe_on_func_entry(unsigned long offset); extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); +extern bool within_kprobe_error_injection_list(unsigned long addr); struct kprobe_insn_cache { struct mutex mutex; diff --git a/include/linux/module.h b/include/linux/module.h index c69b49abe877..548fa09fa806 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -475,6 +475,11 @@ struct module { ctor_fn_t *ctors; unsigned int num_ctors; #endif + +#ifdef CONFIG_BPF_KPROBE_OVERRIDE + unsigned int num_kprobe_ei_funcs; + unsigned long *kprobe_ei_funcs; +#endif } ____cacheline_aligned __randomize_layout; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} -- cgit v1.2.3 From 9802d86585db91655c7d1929a4f6bbe0952ea88e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 11 Dec 2017 11:36:48 -0500 Subject: bpf: add a bpf_override_function helper Error injection is sloppy and very ad-hoc. BPF could fill this niche perfectly with it's kprobe functionality. We could make sure errors are only triggered in specific call chains that we care about with very specific situations. Accomplish this with the bpf_override_funciton helper. This will modify the probe'd callers return value to the specified value and set the PC to an override function that simply returns, bypassing the originally probed function. This gives us a nice clean way to implement systematic error injection for all of our code paths. Acked-by: Alexei Starovoitov Acked-by: Ingo Molnar Signed-off-by: Josef Bacik Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 3 ++- include/linux/trace_events.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 0062302e1285..5feb441d3dd9 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -458,7 +458,8 @@ struct bpf_prog { locked:1, /* Program image locked? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ - dst_needed:1; /* Do we need dst entry? */ + dst_needed:1, /* Do we need dst entry? */ + kprobe_override:1; /* Do we override a kprobe? */ enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ u32 jited_len; /* Size of jited insns in bytes */ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index af44e7c2d577..5fea451f6e28 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -528,6 +528,7 @@ do { \ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); +DECLARE_PER_CPU(int, bpf_kprobe_override); extern int perf_trace_init(struct perf_event *event); extern void perf_trace_destroy(struct perf_event *event); -- cgit v1.2.3 From 3a30ae6ef3cba29c83ca791bde0d06f182d5678d Mon Sep 17 00:00:00 2001 From: Richard Leitner Date: Mon, 11 Dec 2017 13:16:57 +0100 Subject: phylib: Add device reset delay support Some PHYs need a minimum time after the reset gpio was asserted and/or deasserted. To ensure we meet these timing requirements add two new optional devicetree parameters for the phy: reset-delay-us and reset-post-delay-us. Signed-off-by: Richard Leitner Reviewed-by: Geert Uytterhoeven Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/mdio.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 92d4e55ffe67..e37c21d8eb19 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -41,6 +41,8 @@ struct mdio_device { int addr; int flags; struct gpio_desc *reset; + unsigned int reset_delay; + unsigned int reset_post_delay; }; #define to_mdio_device(d) container_of(d, struct mdio_device, dev) -- cgit v1.2.3 From a96684914adb4adb9f81faf6917e0673b92288d8 Mon Sep 17 00:00:00 2001 From: Richard Leitner Date: Mon, 11 Dec 2017 13:16:58 +0100 Subject: phylib: add reset after clk enable support Some PHYs need the refclk to be a continuous clock. Therefore they don't allow turning it off and on again during operation. Nonetheless such a clock switching is performed by some ETH drivers (namely FEC [1]) for power saving reasons. An example for an affected PHY is the SMSC/Microchip LAN8720 in "REF_CLK In Mode". In order to provide a uniform method to overcome this problem this patch adds a new phy_driver flag (PHY_RST_AFTER_CLK_EN) and corresponding function phy_reset_after_clk_enable() to the phylib. These should be used to trigger reset of the PHY after the refclk is switched on again. [1] commit e8fcfcd5684a ("net: fec: optimize the clock management to save power") Signed-off-by: Richard Leitner Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index d3037e2ffbc4..c4b4715caa21 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -59,6 +59,7 @@ #define PHY_HAS_INTERRUPT 0x00000001 #define PHY_IS_INTERNAL 0x00000002 +#define PHY_RST_AFTER_CLK_EN 0x00000004 #define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ @@ -853,6 +854,7 @@ int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); +int phy_reset_after_clk_enable(struct phy_device *phydev); static inline void phy_device_reset(struct phy_device *phydev, int value) { -- cgit v1.2.3 From fc0f9f4d2f26b12fd2eda239bb8f18ceaf192c91 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 12 Dec 2017 07:40:56 +0100 Subject: PCI: Add pcim_set_mwi(), a device-managed pci_set_mwi() Add pcim_set_mwi(), a device-managed version of pci_set_mwi(). First user is the Realtek r8169 driver. Signed-off-by: Heiner Kallweit Acked-by: Bjorn Helgaas Signed-off-by: David S. Miller --- include/linux/pci.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/pci.h b/include/linux/pci.h index 0403894147a3..483b780655bb 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1072,6 +1072,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pci_set_cacheline_size(struct pci_dev *dev); #define HAVE_PCI_SET_MWI int __must_check pci_set_mwi(struct pci_dev *dev); +int __must_check pcim_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); void pci_intx(struct pci_dev *dev, int enable); -- cgit v1.2.3 From 0a62964c9020208c08c8b0323db9ea0c5980876f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 12 Dec 2017 16:00:25 -0800 Subject: net: phy: phylink: Allow specifying PHY device flags In order to let subsystems like DSA fully utilize PHYLINK, we need to be able to communicate phy_device::flags from of_phy_{connect,attach} even when using PHYLINK APIs. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phylink.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 4f0f452ff38d..36e342b450c7 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -188,7 +188,7 @@ struct phylink *phylink_create(struct net_device *, struct fwnode_handle *, void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); -int phylink_of_phy_connect(struct phylink *, struct device_node *); +int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); void phylink_disconnect_phy(struct phylink *); void phylink_mac_change(struct phylink *, bool up); -- cgit v1.2.3 From 1ac63e392e57ffbcd2b567f2c69474f3e9cb889d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 12 Dec 2017 16:00:28 -0800 Subject: net: phy: phylink: Allow setting a custom link state callback phylink_get_fixed_state() currently consults an optional "link_gpio" GPIO descriptor, expand this mechanism to allow specifying a custom callback. This is necessary to support out of band link notifcation (e.g: from an interrupt within a MMIO register). Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phylink.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 36e342b450c7..bd137c273d38 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -190,6 +190,9 @@ void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); void phylink_disconnect_phy(struct phylink *); +int phylink_fixed_state_cb(struct phylink *, + void (*cb)(struct net_device *dev, + struct phylink_link_state *)); void phylink_mac_change(struct phylink *, bool up); -- cgit v1.2.3 From f4e2298e63d24bb7f5cf0f56f72867973cb7e652 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Wed, 13 Dec 2017 10:35:37 -0800 Subject: bpf/tracing: fix kernel/events/core.c compilation error Commit f371b304f12e ("bpf/tracing: allow user space to query prog array on the same tp") introduced a perf ioctl command to query prog array attached to the same perf tracepoint. The commit introduced a compilation error under certain config conditions, e.g., (1). CONFIG_BPF_SYSCALL is not defined, or (2). CONFIG_TRACING is defined but neither CONFIG_UPROBE_EVENTS nor CONFIG_KPROBE_EVENTS is defined. Error message: kernel/events/core.o: In function `perf_ioctl': core.c:(.text+0x98c4): undefined reference to `bpf_event_query_prog_array' This patch fixed this error by guarding the real definition under CONFIG_BPF_EVENTS and provided static inline dummy function if CONFIG_BPF_EVENTS was not defined. It renamed the function from bpf_event_query_prog_array to perf_event_query_prog_array and moved the definition from linux/bpf.h to linux/trace_events.h so the definition is in proximity to other prog_array related functions. Fixes: f371b304f12e ("bpf/tracing: allow user space to query prog array on the same tp") Reported-by: Stephen Rothwell Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 1 - include/linux/trace_events.h | 6 ++++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 93e15b9d80c7..54dc7cae2949 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -254,7 +254,6 @@ typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); -int bpf_event_query_prog_array(struct perf_event *event, void __user *info); int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 5fea451f6e28..8a1442c4e513 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -467,6 +467,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); void perf_event_detach_bpf_prog(struct perf_event *event); +int perf_event_query_prog_array(struct perf_event *event, void __user *info); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -481,6 +482,11 @@ perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } +static inline int +perf_event_query_prog_array(struct perf_event *event, void __user *info) +{ + return -EOPNOTSUPP; +} #endif enum { -- cgit v1.2.3 From 259c8618b0099bfa613997b43857752167cddc20 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 14 Dec 2017 10:27:47 +0000 Subject: sfp: add sff module support Add support for SFF modules, which are soldered down SFP modules. These have a different phys_id value, and also have the present and rate select signals omitted compared with their socketed counter-parts. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/sfp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 47ea32d3e816..0c5c5f6ae1ec 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -231,6 +231,7 @@ enum { SFP_SFF8472_COMPLIANCE = 0x5e, SFP_CC_EXT = 0x5f, + SFP_PHYS_ID_SFF = 0x02, SFP_PHYS_ID_SFP = 0x03, SFP_PHYS_EXT_ID_SFP = 0x04, SFP_CONNECTOR_UNSPEC = 0x00, -- cgit v1.2.3 From 2d07a49aded490a0a4a2748e64030a0f59b6b8be Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 15 Dec 2017 00:41:25 +0800 Subject: sctp: add basic structures and make chunk function for ifwdtsn sctp_ifwdtsn_skip, sctp_ifwdtsn_hdr and sctp_ifwdtsn_chunk are used to define and parse I-FWD TSN chunk format, and sctp_make_ifwdtsn is a function to build the chunk. The I-FORWARD-TSN Chunk Format is defined in section 2.3.1 of RFC8260. Signed-off-by: Xin Long Acked-by: Marcelo R. Leitner Signed-off-by: David S. Miller --- include/linux/sctp.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 38e2cf66195f..b36c76635f18 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -110,6 +110,7 @@ enum sctp_cid { /* Use hex, as defined in ADDIP sec. 3.1 */ SCTP_CID_ASCONF = 0xC1, + SCTP_CID_I_FWD_TSN = 0xC2, SCTP_CID_ASCONF_ACK = 0x80, SCTP_CID_RECONF = 0x82, }; /* enum */ @@ -616,6 +617,22 @@ struct sctp_fwdtsn_chunk { struct sctp_fwdtsn_hdr fwdtsn_hdr; }; +struct sctp_ifwdtsn_skip { + __be16 stream; + __u8 reserved; + __u8 flags; + __be32 mid; +}; + +struct sctp_ifwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_ifwdtsn_skip skip[0]; +}; + +struct sctp_ifwdtsn_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_ifwdtsn_hdr fwdtsn_hdr; +}; /* ADDIP * Section 3.1.1 Address Configuration Change Chunk (ASCONF) -- cgit v1.2.3 From 28dc4c8f4557d82e9be020e85e2362239270e704 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 14 Dec 2017 17:48:16 -0800 Subject: net: phy: broadcom: Add entry for 5395 switch PHYs Add an entry for the builtin PHYs present in the Broadcom BCM5395 switch. This allows us to retrieve the PHY statistics among other things. Signed-off-by: Florian Fainelli Tested-by: Chris Healy Signed-off-by: David S. Miller --- include/linux/brcmphy.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 8ff86b4c1b8a..d3339dd48b1a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -14,6 +14,7 @@ #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM5395 0x0143bcf0 #define PHY_ID_BCM54810 0x03625d00 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 -- cgit v1.2.3 From cc8b0b92a1699bc32f7fec71daa2bfc90de43a4d Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 14 Dec 2017 17:55:05 -0800 Subject: bpf: introduce function calls (function boundaries) Allow arbitrary function calls from bpf function to another bpf function. Since the beginning of bpf all bpf programs were represented as a single function and program authors were forced to use always_inline for all functions in their C code. That was causing llvm to unnecessary inflate the code size and forcing developers to move code to header files with little code reuse. With a bit of additional complexity teach verifier to recognize arbitrary function calls from one bpf function to another as long as all of functions are presented to the verifier as a single bpf program. New program layout: r6 = r1 // some code .. r1 = .. // arg1 r2 = .. // arg2 call pc+1 // function call pc-relative exit .. = r1 // access arg1 .. = r2 // access arg2 .. call pc+20 // second level of function call ... It allows for better optimized code and finally allows to introduce the core bpf libraries that can be reused in different projects, since programs are no longer limited by single elf file. With function calls bpf can be compiled into multiple .o files. This patch is the first step. It detects programs that contain multiple functions and checks that calls between them are valid. It splits the sequence of bpf instructions (one program) into a set of bpf functions that call each other. Calls to only known functions are allowed. In the future the verifier may allow calls to unresolved functions and will do dynamic linking. This logic supports statically linked bpf functions only. Such function boundary detection could have been done as part of control flow graph building in check_cfg(), but it's cleaner to separate function boundary detection vs control flow checks within a subprogram (function) into logically indepedent steps. Follow up patches may split check_cfg() further, but not check_subprogs(). Only allow bpf-to-bpf calls for root only and for non-hw-offloaded programs. These restrictions can be relaxed in the future. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c561b986bab0..91a583bb3fa7 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -141,6 +141,8 @@ struct bpf_ext_analyzer_ops { int insn_idx, int prev_insn_idx); }; +#define BPF_MAX_SUBPROGS 256 + /* single container for all structs * one verifier_env per bpf_check() call */ @@ -159,8 +161,9 @@ struct bpf_verifier_env { bool allow_ptr_leaks; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ - struct bpf_verifer_log log; + u32 subprog_starts[BPF_MAX_SUBPROGS]; + u32 subprog_cnt; }; static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) -- cgit v1.2.3 From f4d7e40a5b7157e1329c3c5b10f60d8289fc2941 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 14 Dec 2017 17:55:06 -0800 Subject: bpf: introduce function calls (verification) Allow arbitrary function calls from bpf function to another bpf function. To recognize such set of bpf functions the verifier does: 1. runs control flow analysis to detect function boundaries 2. proceeds with verification of all functions starting from main(root) function It recognizes that the stack of the caller can be accessed by the callee (if the caller passed a pointer to its stack to the callee) and the callee can store map_value and other pointers into the stack of the caller. 3. keeps track of the stack_depth of each function to make sure that total stack depth is still less than 512 bytes 4. disallows pointers to the callee stack to be stored into the caller stack, since they will be invalid as soon as the callee returns 5. to reuse all of the existing state_pruning logic each function call is considered to be independent call from the verifier point of view. The verifier pretends to inline all function calls it sees are being called. It stores the callsite instruction index as part of the state to make sure that two calls to the same callee from two different places in the caller will be different from state pruning point of view 6. more safety checks are added to liveness analysis Implementation details: . struct bpf_verifier_state is now consists of all stack frames that led to this function . struct bpf_func_state represent one stack frame. It consists of registers in the given frame and its stack . propagate_liveness() logic had a premature optimization where mark_reg_read() and mark_stack_slot_read() were manually inlined with loop iterating over parents for each register or stack slot. Undo this optimization to reuse more complex mark_*_read() logic . skip_callee() logic is not necessary from safety point of view, but without it mark_*_read() markings become too conservative, since after returning from the funciton call a read of r6-r9 will incorrectly propagate the read marks into callee causing inefficient pruning later . mark_*_read() logic is now aware of control flow which makes it more complex. In the future the plan is to rewrite liveness to be hierarchical. So that liveness can be done within basic block only and control flow will be responsible for propagation of liveness information along cfg and between calls. . tail_calls and ld_abs insns are not allowed in the programs with bpf-to-bpf calls . returning stack pointers to the caller or storing them into stack frame of the caller is not allowed Testing: . no difference in cilium processed_insn numbers . large number of tests follows in next patches Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 91a583bb3fa7..1f23408024ee 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -76,6 +76,14 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + /* Inside the callee two registers can be both PTR_TO_STACK like + * R1=fp-8 and R2=fp-8, but one of them points to this function stack + * while another to the caller's stack. To differentiate them 'frameno' + * is used which is an index in bpf_verifier_state->frame[] array + * pointing to bpf_func_state. + * This field must be second to last, for states_equal() reasons. + */ + u32 frameno; /* This field must be last, for states_equal() reasons. */ enum bpf_reg_liveness live; }; @@ -96,13 +104,34 @@ struct bpf_stack_state { /* state of the program: * type of all registers and stack info */ -struct bpf_verifier_state { +struct bpf_func_state { struct bpf_reg_state regs[MAX_BPF_REG]; struct bpf_verifier_state *parent; + /* index of call instruction that called into this func */ + int callsite; + /* stack frame number of this function state from pov of + * enclosing bpf_verifier_state. + * 0 = main function, 1 = first callee. + */ + u32 frameno; + /* subprog number == index within subprog_stack_depth + * zero == main subprog + */ + u32 subprogno; + + /* should be second to last. See copy_func_state() */ int allocated_stack; struct bpf_stack_state *stack; }; +#define MAX_CALL_FRAMES 8 +struct bpf_verifier_state { + /* call stack tracking */ + struct bpf_func_state *frame[MAX_CALL_FRAMES]; + struct bpf_verifier_state *parent; + u32 curframe; +}; + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; @@ -163,12 +192,15 @@ struct bpf_verifier_env { struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ struct bpf_verifer_log log; u32 subprog_starts[BPF_MAX_SUBPROGS]; + u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; u32 subprog_cnt; }; static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) { - return env->cur_state->regs; + struct bpf_verifier_state *cur = env->cur_state; + + return cur->frame[cur->curframe]->regs; } #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) -- cgit v1.2.3 From cc2b14d51053eb055c06f45e1a5cdbfcf2b79e94 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 14 Dec 2017 17:55:08 -0800 Subject: bpf: teach verifier to recognize zero initialized stack programs with function calls are often passing various pointers via stack. When all calls are inlined llvm flattens stack accesses and optimizes away extra branches. When functions are not inlined it becomes the job of the verifier to recognize zero initialized stack to avoid exploring paths that program will not take. The following program would fail otherwise: ptr = &buffer_on_stack; *ptr = 0; ... func_call(.., ptr, ...) { if (..) *ptr = bpf_map_lookup(); } ... if (*ptr != 0) { // Access (*ptr)->field is valid. // Without stack_zero tracking such (*ptr)->field access // will be rejected } since stack slots are no longer uniform invalid | spill | misc add liveness marking to all slots, but do it in 8 byte chunks. So if nothing was read or written in [fp-16, fp-9] range it will be marked as LIVE_NONE. If any byte in that range was read, it will be marked LIVE_READ and stacksafe() check will perform byte-by-byte verification. If all bytes in the range were written the slot will be marked as LIVE_WRITTEN. This significantly speeds up state equality comparison and reduces total number of states processed. before after bpf_lb-DLB_L3.o 2051 2003 bpf_lb-DLB_L4.o 3287 3164 bpf_lb-DUNKNOWN.o 1080 1080 bpf_lxc-DDROP_ALL.o 24980 12361 bpf_lxc-DUNKNOWN.o 34308 16605 bpf_netdev.o 15404 10962 bpf_overlay.o 7191 6679 Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1f23408024ee..585d4e17ea88 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -91,7 +91,8 @@ struct bpf_reg_state { enum bpf_stack_slot_type { STACK_INVALID, /* nothing was stored in this stack slot */ STACK_SPILL, /* register spilled into stack */ - STACK_MISC /* BPF program wrote some data into this slot */ + STACK_MISC, /* BPF program wrote some data into this slot */ + STACK_ZERO, /* BPF program wrote constant zero */ }; #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ -- cgit v1.2.3 From 1ea47e01ad6ea0fe99697c54c2413d81dd21fe32 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 14 Dec 2017 17:55:13 -0800 Subject: bpf: add support for bpf_call to interpreter though bpf_call is still the same call instruction and calling convention 'bpf to bpf' and 'bpf to helper' is the same the interpreter has to oparate on 'struct bpf_insn *'. To distinguish these two cases add a kernel internal opcode and mark call insns with it. This opcode is seen by interpreter only. JITs will never see it. Also add tiny bit of debug code to aid interpreter debugging. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 1 + include/linux/filter.h | 6 ++++++ 2 files changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 54dc7cae2949..8935f6f63d5f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -402,6 +402,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); +void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); /* Map specifics */ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); diff --git a/include/linux/filter.h b/include/linux/filter.h index 5feb441d3dd9..f26e6da1007b 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -58,6 +58,9 @@ struct bpf_prog_aux; /* unused opcode to mark special call to bpf_tail_call() helper */ #define BPF_TAIL_CALL 0xf0 +/* unused opcode to mark call to interpreter with arguments */ +#define BPF_CALL_ARGS 0xe0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -710,6 +713,9 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +#define __bpf_call_base_args \ + ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ + __bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); -- cgit v1.2.3 From 60b58afc96c9df71871df2dbad42037757ceef26 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 14 Dec 2017 17:55:14 -0800 Subject: bpf: fix net.core.bpf_jit_enable race global bpf_jit_enable variable is tested multiple times in JITs, blinding and verifier core. The malicious root can try to toggle it while loading the programs. This race condition was accounted for and there should be no issues, but it's safer to avoid this race condition. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: Daniel Borkmann --- include/linux/filter.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index f26e6da1007b..3d6edc34932c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -458,6 +458,7 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ + jit_requested:1,/* archs need to JIT the prog */ locked:1, /* Program image locked? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ @@ -804,7 +805,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) return fp->jited && bpf_jit_is_ebpf(); } -static inline bool bpf_jit_blinding_enabled(void) +static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) { /* These are the prerequisites, should someone ever have the * idea to call blinding outside of them, we make sure to @@ -812,7 +813,7 @@ static inline bool bpf_jit_blinding_enabled(void) */ if (!bpf_jit_is_ebpf()) return false; - if (!bpf_jit_enable) + if (!prog->jit_requested) return false; if (!bpf_jit_harden) return false; -- cgit v1.2.3 From 1c2a088a6626d4f51d2f2c97b0cbedbfbf3637f6 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 14 Dec 2017 17:55:15 -0800 Subject: bpf: x64: add JIT support for multi-function programs Typical JIT does several passes over bpf instructions to compute total size and relative offsets of jumps and calls. With multitple bpf functions calling each other all relative calls will have invalid offsets intially therefore we need to additional last pass over the program to emit calls with correct offsets. For example in case of three bpf functions: main: call foo call bpf_map_lookup exit foo: call bar exit bar: exit We will call bpf_int_jit_compile() indepedently for main(), foo() and bar() x64 JIT typically does 4-5 passes to converge. After these initial passes the image for these 3 functions will be good except call targets, since start addresses of foo() and bar() are unknown when we were JITing main() (note that call bpf_map_lookup will be resolved properly during initial passes). Once start addresses of 3 functions are known we patch call_insn->imm to point to right functions and call bpf_int_jit_compile() again which needs only one pass. Additional safety checks are done to make sure this last pass doesn't produce image that is larger or smaller than previous pass. When constant blinding is on it's applied to all functions at the first pass, since doing it once again at the last pass can change size of the JITed code. Tested on x64 and arm64 hw with JIT on/off, blinding on/off. x64 jits bpf-to-bpf calls correctly while arm64 falls back to interpreter. All other JITs that support normal BPF_CALL will behave the same way since bpf-to-bpf call is equivalent to bpf-to-kernel call from JITs point of view. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 3 +++ include/linux/bpf_verifier.h | 1 + include/linux/filter.h | 2 ++ 3 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8935f6f63d5f..da54ef644fcd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -200,6 +200,9 @@ struct bpf_prog_aux { u32 max_ctx_offset; u32 stack_depth; u32 id; + u32 func_cnt; + struct bpf_prog **func; + void *jit_data; /* JIT specific data. arch dependent */ struct latch_tree_node ksym_tnode; struct list_head ksym_lnode; const struct bpf_prog_ops *ops; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 585d4e17ea88..aaac589e490c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -143,6 +143,7 @@ struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ + s32 call_imm; /* saved imm field of call insn */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ bool seen; /* this insn was processed by the verifier */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 3d6edc34932c..e872b4ebaa57 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -463,6 +463,8 @@ struct bpf_prog { gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ + blinded:1, /* Was blinded */ + is_func:1, /* program is a bpf function */ kprobe_override:1; /* Do we override a kprobe? */ enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ -- cgit v1.2.3 From fb1f5f79ae96331a0201b4080d34f3bc3b5c0b1d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sat, 16 Dec 2017 03:09:40 -0500 Subject: net: Introduce NETIF_F_GRO_HW. Introduce NETIF_F_GRO_HW feature flag for NICs that support hardware GRO. With this flag, we can now independently turn on or off hardware GRO when GRO is on. Previously, drivers were using NETIF_F_GRO to control hardware GRO and so it cannot be independently turned on or off without affecting GRO. Hardware GRO (just like GRO) guarantees that packets can be re-segmented by TSO/GSO to reconstruct the original packet stream. Logically, GRO_HW should depend on GRO since it a subset, but we will let individual drivers enforce this dependency as they see fit. Since NETIF_F_GRO is not propagated between upper and lower devices, NETIF_F_GRO_HW should follow suit since it is a subset of GRO. In other words, a lower device can independent have GRO/GRO_HW enabled or disabled and no feature propagation is required. This will preserve the current GRO behavior. This can be changed later if we decide to propagate GRO/ GRO_HW/RXCSUM from upper to lower devices. Cc: Ariel Elior Cc: everest-linux-l2@cavium.com Signed-off-by: Michael Chan Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index b1b0ca7ccb2b..db84c516bcfb 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -78,6 +78,8 @@ enum { NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ + NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ + /* * Add your fresh new feature above and remember to update * netdev_features_strings[] in net/core/ethtool.c and maybe @@ -97,6 +99,7 @@ enum { #define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) #define NETIF_F_FSO __NETIF_F(FSO) #define NETIF_F_GRO __NETIF_F(GRO) +#define NETIF_F_GRO_HW __NETIF_F(GRO_HW) #define NETIF_F_GSO __NETIF_F(GSO) #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) -- cgit v1.2.3 From f53c723902d1ac5f0b0a11d7c9dcbff748dde74e Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 20 Dec 2017 10:41:36 +0100 Subject: net: Add asynchronous callbacks for xfrm on layer 2. This patch implements asynchronous crypto callbacks and a backlog handler that can be used when IPsec is done at layer 2 in the TX path. It also extends the skb validate functions so that we can update the driver transmit return codes based on async crypto operation or to indicate that we queued the packet in a backlog queue. Joint work with: Aviv Heller Signed-off-by: Steffen Klassert --- include/linux/netdevice.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cc4ce7456e38..c82d207ebc97 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2793,7 +2793,9 @@ struct softnet_data { struct Qdisc *output_queue; struct Qdisc **output_queue_tailp; struct sk_buff *completion_queue; - +#ifdef CONFIG_XFRM_OFFLOAD + struct sk_buff_head xfrm_backlog; +#endif #ifdef CONFIG_RPS /* input_queue_head should be written by cpu owning this struct, * and only read by other cpus. Worth using a cache line. @@ -3325,7 +3327,7 @@ int dev_get_phys_port_id(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); -struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); -- cgit v1.2.3 From 7105e828c087de970fcb5a9509db51bfe6bd7894 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 20 Dec 2017 13:42:57 +0100 Subject: bpf: allow for correlation of maps and helpers in dump Currently a dump of an xlated prog (post verifier stage) doesn't correlate used helpers as well as maps. The prog info lists involved map ids, however there's no correlation of where in the program they are used as of today. Likewise, bpftool does not correlate helper calls with the target functions. The latter can be done w/o any kernel changes through kallsyms, and also has the advantage that this works with inlined helpers and BPF calls. Example, via interpreter: # tc filter show dev foo ingress filter protocol all pref 49152 bpf chain 0 filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \ direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1 * Output before patch (calls/maps remain unclear): # bpftool prog dump xlated id 1 <-- dump prog id:1 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = 0xffff95c47a8d4800 6: (85) call unknown#73040 7: (15) if r0 == 0x0 goto pc+18 8: (bf) r2 = r10 9: (07) r2 += -4 10: (bf) r1 = r0 11: (85) call unknown#73040 12: (15) if r0 == 0x0 goto pc+23 [...] * Output after patch: # bpftool prog dump xlated id 1 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] <-- map id:2 6: (85) call bpf_map_lookup_elem#73424 <-- helper call 7: (15) if r0 == 0x0 goto pc+18 8: (bf) r2 = r10 9: (07) r2 += -4 10: (bf) r1 = r0 11: (85) call bpf_map_lookup_elem#73424 12: (15) if r0 == 0x0 goto pc+23 [...] # bpftool map show id 2 <-- show/dump/etc map id:2 2: hash_of_maps flags 0x0 key 4B value 4B max_entries 3 memlock 4096B Example, JITed, same prog: # tc filter show dev foo ingress filter protocol all pref 49152 bpf chain 0 filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \ direct-action not_in_hw id 3 tag c74773051b364165 jited # bpftool prog show id 3 3: sched_cls tag c74773051b364165 loaded_at Dec 19/13:48 uid 0 xlated 384B jited 257B memlock 4096B map_ids 2 # bpftool prog dump xlated id 3 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] <-- map id:2 6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite 7: (15) if r0 == 0x0 goto pc+2 | 8: (07) r0 += 56 | 9: (79) r0 = *(u64 *)(r0 +0) <-+ 10: (15) if r0 == 0x0 goto pc+24 11: (bf) r2 = r10 12: (07) r2 += -4 [...] Example, same prog, but kallsyms disabled (in that case we are also not allowed to pass any relative offsets, etc, so prog becomes pointer sanitized on dump): # sysctl kernel.kptr_restrict=2 kernel.kptr_restrict = 2 # bpftool prog dump xlated id 3 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] 6: (85) call bpf_unspec#0 7: (15) if r0 == 0x0 goto pc+2 [...] Example, BPF calls via interpreter: # bpftool prog dump xlated id 1 0: (85) call pc+2#__bpf_prog_run_args32 1: (b7) r0 = 1 2: (95) exit 3: (b7) r0 = 2 4: (95) exit Example, BPF calls via JIT: # sysctl net.core.bpf_jit_enable=1 net.core.bpf_jit_enable = 1 # sysctl net.core.bpf_jit_kallsyms=1 net.core.bpf_jit_kallsyms = 1 # bpftool prog dump xlated id 1 0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F 1: (b7) r0 = 1 2: (95) exit 3: (b7) r0 = 2 4: (95) exit And finally, an example for tail calls that is now working as well wrt correlation: # bpftool prog dump xlated id 2 [...] 10: (b7) r2 = 8 11: (85) call bpf_trace_printk#-41312 12: (bf) r1 = r6 13: (18) r2 = map[id:1] 15: (b7) r3 = 0 16: (85) call bpf_tail_call#12 17: (b7) r1 = 42 18: (6b) *(u16 *)(r6 +46) = r1 19: (b7) r0 = 0 20: (95) exit # bpftool map show id 1 1: prog_array flags 0x0 key 4B value 4B max_entries 1 memlock 4096B Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index e872b4ebaa57..2b0df2703671 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -18,6 +18,7 @@ #include #include #include +#include #include @@ -724,6 +725,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_pkt_data(void *func); +static inline bool bpf_dump_raw_ok(void) +{ + /* Reconstruction of call-sites is dependent on kallsyms, + * thus make dump the same restriction. + */ + return kallsyms_show_value() == 1; +} + struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); -- cgit v1.2.3 From 9cb0d21d01b974761735a7f5345d632f967e625a Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 19 Dec 2017 15:35:49 -0800 Subject: xfrm: wrap xfrmdev_ops with offload config There's no reason to define netdev->xfrmdev_ops if the offload facility is not CONFIG'd in. Signed-off-by: Shannon Nelson Signed-off-by: Steffen Klassert --- include/linux/netdevice.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c82d207ebc97..352066e4eeef 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1726,7 +1726,7 @@ struct net_device { const struct ndisc_ops *ndisc_ops; #endif -#ifdef CONFIG_XFRM +#ifdef CONFIG_XFRM_OFFLOAD const struct xfrmdev_ops *xfrmdev_ops; #endif -- cgit v1.2.3 From 66364bdf3a7e0470e84590d48fc11d902eca7fa9 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 21 Dec 2017 11:40:04 +0200 Subject: rtnetlink: Replace implementation of ASSERT_RTNL() macro with WARN_ONCE() ASSERT_RTNL() macro is actual open-coded variant of WARN_ONCE() with two exceptions. First, it prints stack for multiple hits and not only once as WARN_ONCE() does. Second, the user can disable prints of WARN_ONCE by setting CONFIG_BUG to N. The multiple prints of dump stack are actually not needed, because calls without rtnl lock are programming errors and user can't do anything about them except to complain to the mailing list after first occurrence of such failure. The user who disabled BUG/WARN prints did it explicitly because by default in upstream kernel and distributions this option is enabled. It means that user doesn't want to see prints about missing locks too. This patch replaces open-coded variant in favor of already existing macro and change error prints to be once only. Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 2032ce2eb20b..62d508b31f56 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -97,13 +97,9 @@ void rtnetlink_init(void); void __rtnl_unlock(void); void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); -#define ASSERT_RTNL() do { \ - if (unlikely(!rtnl_is_locked())) { \ - printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ - __FILE__, __LINE__); \ - dump_stack(); \ - } \ -} while(0) +#define ASSERT_RTNL() \ + WARN_ONCE(!rtnl_is_locked(), \ + "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, -- cgit v1.2.3 From 04f629f730fcd30c811777d186b15c38737eaa3c Mon Sep 17 00:00:00 2001 From: Richard Leitner Date: Fri, 22 Dec 2017 11:08:09 +0100 Subject: phylib: rename reset-(post-)delay-us to reset-(de)assert-us As suggested by Rob Herring [1] rename the previously introduced reset-{,post-}delay-us bindings to the clearer reset-{,de}assert-us [1] https://patchwork.kernel.org/patch/10104905/ Signed-off-by: Richard Leitner Reviewed-by: Rob Herring Signed-off-by: David S. Miller --- include/linux/mdio.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mdio.h b/include/linux/mdio.h index e37c21d8eb19..268aad47ecd3 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -41,8 +41,8 @@ struct mdio_device { int addr; int flags; struct gpio_desc *reset; - unsigned int reset_delay; - unsigned int reset_post_delay; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; }; #define to_mdio_device(d) container_of(d, struct mdio_device, dev) -- cgit v1.2.3 From 70a87ffea8acc390ae315fa1930e849f656bdb88 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 25 Dec 2017 13:15:40 -0800 Subject: bpf: fix maximum stack depth tracking logic Instead of computing max stack depth for current call chain during the main verifier pass track stack depth of each function independently and after do_check() is done do another pass over all instructions analyzing depth of all possible call stacks. Fixes: f4d7e40a5b71 ("bpf: introduce function calls (verification)") Reported-by: Jann Horn Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index aaac589e490c..94a02ceb1246 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -194,6 +194,7 @@ struct bpf_verifier_env { struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ struct bpf_verifer_log log; u32 subprog_starts[BPF_MAX_SUBPROGS]; + /* computes the stack depth of each bpf function */ u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; u32 subprog_cnt; }; -- cgit v1.2.3 From 9b93ab981e3bf62ff95a8cbb6faf652cd400decd Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 28 Nov 2017 11:58:51 +0200 Subject: net/mlx5: Separate ingress/egress namespaces for each vport Each vport has its own root flow table for the ACL flow tables and root flow table is per namespace, therefore we should create a namespace for each vport. Fixes: efdc810ba39d ("net/mlx5: Flow steering, Add vport ACL support") Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- include/linux/mlx5/fs.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b25e7baa273e..a0b48afcb422 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -95,6 +95,10 @@ struct mlx5_flow_destination { struct mlx5_flow_namespace * mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); +struct mlx5_flow_namespace * +mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type, + int vport); struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, -- cgit v1.2.3 From 9a18eedb145d080d542766af1d7513ebfccd1604 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 27 Dec 2017 18:39:04 -0800 Subject: bpf: offload: don't use prog->aux->offload as boolean We currently use aux->offload to indicate that program is bound to a specific device. This forces us to keep the offload structure around even after the device is gone. Add a bool member to struct bpf_prog_aux to indicate if offload was requested. Suggested-by: Alexei Starovoitov Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index da54ef644fcd..838eee10e979 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -201,6 +201,7 @@ struct bpf_prog_aux { u32 stack_depth; u32 id; u32 func_cnt; + bool offload_requested; struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ struct latch_tree_node ksym_tnode; @@ -529,7 +530,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) { - return aux->offload; + return aux->offload_requested; } #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, -- cgit v1.2.3 From cae1927c0b4a93ae15de824faca1f6f611a44fcd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 27 Dec 2017 18:39:05 -0800 Subject: bpf: offload: allow netdev to disappear while verifier is running To allow verifier instruction callbacks without any extra locking NETDEV_UNREGISTER notification would wait on a waitqueue for verifier to finish. This design decision was made when rtnl lock was providing all the locking. Use the read/write lock instead and remove the workqueue. Verifier will now call into the offload code, so dev_ops are moved to offload structure. Since verifier calls are all under bpf_prog_is_dev_bound() we no longer need static inline implementations to please builds with CONFIG_NET=n. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 9 +++++++-- include/linux/bpf_verifier.h | 16 ++-------------- include/linux/netdevice.h | 4 ++-- 3 files changed, 11 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 838eee10e979..669549f7e3e8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -17,6 +17,7 @@ #include #include +struct bpf_verifier_env; struct perf_event; struct bpf_prog; struct bpf_map; @@ -184,14 +185,18 @@ struct bpf_verifier_ops { struct bpf_prog *prog, u32 *target_size); }; +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); +}; + struct bpf_dev_offload { struct bpf_prog *prog; struct net_device *netdev; void *dev_priv; struct list_head offloads; bool dev_state; - bool verifier_running; - wait_queue_head_t verifier_done; + const struct bpf_prog_offload_ops *dev_ops; }; struct bpf_prog_aux { diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 883a35d50cd5..2feb218c001d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -166,12 +166,6 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log) return log->len_used >= log->len_total - 1; } -struct bpf_verifier_env; -struct bpf_ext_analyzer_ops { - int (*insn_hook)(struct bpf_verifier_env *env, - int insn_idx, int prev_insn_idx); -}; - #define BPF_MAX_SUBPROGS 256 /* single container for all structs @@ -185,7 +179,6 @@ struct bpf_verifier_env { bool strict_alignment; /* perform strict pointer alignment checks */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ - const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ @@ -206,13 +199,8 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) return cur->frame[cur->curframe]->regs; } -#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); -#else -static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) -{ - return -EOPNOTSUPP; -} -#endif +int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 352066e4eeef..49bfc6eec74c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -804,7 +804,7 @@ enum bpf_netdev_command { BPF_OFFLOAD_DESTROY, }; -struct bpf_ext_analyzer_ops; +struct bpf_prog_offload_ops; struct netlink_ext_ack; struct netdev_bpf { @@ -826,7 +826,7 @@ struct netdev_bpf { /* BPF_OFFLOAD_VERIFIER_PREP */ struct { struct bpf_prog *prog; - const struct bpf_ext_analyzer_ops *ops; /* callee set */ + const struct bpf_prog_offload_ops *ops; /* callee set */ } verifier; /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ struct { -- cgit v1.2.3 From ad8ad79f4f6078f456792f7f8d344da2be9bc74f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 27 Dec 2017 18:39:07 -0800 Subject: bpf: offload: free program id when device disappears Bound programs are quite useless after their device disappears. They are simply waiting for reference count to go to zero, don't list them in BPF_PROG_GET_NEXT_ID by freeing their ID early. Note that orphaned offload programs will return -ENODEV on BPF_OBJ_GET_INFO_BY_FD so user will never see ID 0. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 669549f7e3e8..9a916ab34299 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -357,6 +357,8 @@ void bpf_prog_put(struct bpf_prog *prog); int __bpf_prog_charge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages); +void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); + struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); -- cgit v1.2.3 From cdab6ba8668d68f031bfd5d237b4586ec4f8cd88 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 27 Dec 2017 18:39:08 -0800 Subject: nsfs: generalize ns_get_path() for path resolution with a task ns_get_path() takes struct task_struct and proc_ns_ops as its parameters. For path resolution directly from a namespace, e.g. based on a networking device's net name space, we need more flexibility. Add a ns_get_path_cb() helper which will allow callers to use any method of obtaining the name space reference. Convert ns_get_path() to use ns_get_path_cb(). Following patches will bring a networking user. CC: Eric W. Biederman Suggested-by: Daniel Borkmann Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- include/linux/proc_ns.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 2ff18c9840a7..d31cb6215905 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd); #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) extern void *ns_get_path(struct path *path, struct task_struct *task, const struct proc_ns_operations *ns_ops); +typedef struct ns_common *ns_get_path_helper_t(void *); +extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, + void *private_data); extern int ns_get_name(char *buf, size_t size, struct task_struct *task, const struct proc_ns_operations *ns_ops); -- cgit v1.2.3 From 675fc275a3a2d905535207237402c6d8dcb5fa4b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 27 Dec 2017 18:39:09 -0800 Subject: bpf: offload: report device information for offloaded programs Report to the user ifindex and namespace information of offloaded programs. If device has disappeared return -ENODEV. Specify the namespace using dev/inode combination. CC: Eric W. Biederman Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9a916ab34299..7810ae57b357 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -531,6 +531,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, int bpf_prog_offload_compile(struct bpf_prog *prog); void bpf_prog_offload_destroy(struct bpf_prog *prog); +int bpf_prog_offload_info_fill(struct bpf_prog_info *info, + struct bpf_prog *prog); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); -- cgit v1.2.3 From bcecb4bbf88aa03171c30652bca761cf27755a6b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 27 Dec 2017 19:50:25 -0800 Subject: net: ptr_ring: otherwise safe empty checks can overrun array bounds When running consumer and/or producer operations and empty checks in parallel its possible to have the empty check run past the end of the array. The scenario occurs when an empty check is run while __ptr_ring_discard_one() is in progress. Specifically after the consumer_head is incremented but before (consumer_head >= ring_size) check is made and the consumer head is zeroe'd. To resolve this, without having to rework how consumer/producer ops work on the array, simply add an extra dummy slot to the end of the array. Even if we did a rework to avoid the extra slot it looks like the normal case checks would suffer some so best to just allocate an extra pointer. Reported-by: Jakub Kicinski Fixes: c5ad119fb6c09 ("net: sched: pfifo_fast use skb_array") Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6866df4f31b5..13fb06a103c6 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -447,7 +447,12 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { - return kcalloc(size, sizeof(void *), gfp); + /* Allocate an extra dummy element at end of ring to avoid consumer head + * or produce head access past the end of the array. Possible when + * producer/consumer operations and __ptr_ring_peek operations run in + * parallel. + */ + return kcalloc(size + 1, sizeof(void *), gfp); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) -- cgit v1.2.3 From a2e7699eb50fda6450036129f7c0642b3349b879 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Wed, 27 Dec 2017 19:30:05 +0200 Subject: qed*: Refactoring and rearranging FW API with no functional impact This patch refactors and reorders the FW API files in preparation of upgrading the code to support new FW. - Make use of the BIT macro in appropriate places. - Whitespace changes to align values and code blocks. - Comments are updated (spelling mistakes, removed if not clear). - Group together code blocks which are related or deal with similar matters. Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: Tomer Tayar Signed-off-by: David S. Miller --- include/linux/qed/common_hsi.h | 1165 ++++++++++++++-------------- include/linux/qed/eth_common.h | 363 ++++----- include/linux/qed/fcoe_common.h | 874 ++++++++++----------- include/linux/qed/iscsi_common.h | 1462 +++++++++++++++++++----------------- include/linux/qed/iwarp_common.h | 17 +- include/linux/qed/qed_if.h | 20 +- include/linux/qed/rdma_common.h | 25 +- include/linux/qed/roce_common.h | 15 +- include/linux/qed/storage_common.h | 45 +- include/linux/qed/tcp_common.h | 129 ++-- 10 files changed, 2114 insertions(+), 2001 deletions(-) (limited to 'include/linux') diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 39e2a2ac2471..4874c104144b 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -32,14 +32,15 @@ #ifndef _COMMON_HSI_H #define _COMMON_HSI_H + #include #include #include #include /* dma_addr_t manip */ -#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) -#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) +#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) +#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_REGPAIR_LE(x, val) do { \ @@ -47,39 +48,45 @@ (x).lo = DMA_LO_LE((val)); \ } while (0) -#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) -#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) -#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_64(hi, lo) \ + HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64) +#define HILO_64_REGPAIR(regpair) ({ \ + typeof(regpair) __regpair = (regpair); \ + HILO_64(__regpair.hi, __regpair.lo); }) #define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) #ifndef __COMMON_HSI__ #define __COMMON_HSI__ +/********************************/ +/* PROTOCOL COMMON FW CONSTANTS */ +/********************************/ -#define X_FINAL_CLEANUP_AGG_INT 1 +#define X_FINAL_CLEANUP_AGG_INT 1 -#define EVENT_RING_PAGE_SIZE_BYTES 4096 +#define EVENT_RING_PAGE_SIZE_BYTES 4096 -#define NUM_OF_GLOBAL_QUEUES 128 -#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 +#define NUM_OF_GLOBAL_QUEUES 128 +#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 -#define ISCSI_CDU_TASK_SEG_TYPE 0 -#define FCOE_CDU_TASK_SEG_TYPE 0 -#define RDMA_CDU_TASK_SEG_TYPE 1 +#define ISCSI_CDU_TASK_SEG_TYPE 0 +#define FCOE_CDU_TASK_SEG_TYPE 0 +#define RDMA_CDU_TASK_SEG_TYPE 1 -#define FW_ASSERT_GENERAL_ATTN_IDX 32 +#define FW_ASSERT_GENERAL_ATTN_IDX 32 -#define MAX_PINNED_CCFC 32 +#define MAX_PINNED_CCFC 32 /* Queue Zone sizes in bytes */ -#define TSTORM_QZONE_SIZE 8 -#define MSTORM_QZONE_SIZE 16 -#define USTORM_QZONE_SIZE 8 -#define XSTORM_QZONE_SIZE 8 -#define YSTORM_QZONE_SIZE 0 -#define PSTORM_QZONE_SIZE 0 - -#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 +#define TSTORM_QZONE_SIZE 8 +#define MSTORM_QZONE_SIZE 16 +#define USTORM_QZONE_SIZE 8 +#define XSTORM_QZONE_SIZE 8 +#define YSTORM_QZONE_SIZE 0 +#define PSTORM_QZONE_SIZE 0 + +#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 @@ -115,10 +122,10 @@ #define MAX_NUM_PORTS_BB (2) #define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) -#define MAX_NUM_PFS_K2 (16) -#define MAX_NUM_PFS_BB (8) -#define MAX_NUM_PFS (MAX_NUM_PFS_K2) -#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ #define MAX_NUM_VFS_K2 (192) #define MAX_NUM_VFS_BB (120) @@ -147,9 +154,6 @@ #define LB_TC (NUM_OF_PHYS_TCS) -/* Num of possible traffic priority values */ -#define NUM_OF_PRIO (8) - #define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) #define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) #define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) @@ -157,13 +161,8 @@ /* CIDs */ #define NUM_OF_CONNECTION_TYPES (8) -#define NUM_OF_LCIDS (320) -#define NUM_OF_LTIDS (320) - -/* Clock values */ -#define MASTER_CLK_FREQ_E4 (375e6) -#define STORM_CLK_FREQ_E4 (1000e6) -#define CLK25M_CLK_FREQ_E4 (25e6) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) /* Global PXP windows (GTT) */ #define NUM_OF_GTT 19 @@ -172,17 +171,17 @@ #define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) /* Tools Version */ -#define TOOLS_VERSION 10 +#define TOOLS_VERSION 10 /*****************/ /* CDU CONSTANTS */ /*****************/ -#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) -#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) -#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) -#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) @@ -201,45 +200,45 @@ #define DQ_DEMS_TOE_LOCAL_ADV_WND 4 #define DQ_DEMS_ROCE_CQ_CONS 7 -/* XCM agg val selection */ -#define DQ_XCM_AGG_VAL_SEL_WORD2 0 -#define DQ_XCM_AGG_VAL_SEL_WORD3 1 -#define DQ_XCM_AGG_VAL_SEL_WORD4 2 -#define DQ_XCM_AGG_VAL_SEL_WORD5 3 -#define DQ_XCM_AGG_VAL_SEL_REG3 4 -#define DQ_XCM_AGG_VAL_SEL_REG4 5 -#define DQ_XCM_AGG_VAL_SEL_REG5 6 -#define DQ_XCM_AGG_VAL_SEL_REG6 7 - -/* XCM agg val selection */ -#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 -#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 -#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 -#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 -#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 -#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 -#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 +/* XCM agg val selection (HW) */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection (FW) */ +#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 +#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 /* UCM agg val selection (HW) */ #define DQ_UCM_AGG_VAL_SEL_WORD0 0 #define DQ_UCM_AGG_VAL_SEL_WORD1 1 #define DQ_UCM_AGG_VAL_SEL_WORD2 2 #define DQ_UCM_AGG_VAL_SEL_WORD3 3 -#define DQ_UCM_AGG_VAL_SEL_REG0 4 -#define DQ_UCM_AGG_VAL_SEL_REG1 5 -#define DQ_UCM_AGG_VAL_SEL_REG2 6 -#define DQ_UCM_AGG_VAL_SEL_REG3 7 +#define DQ_UCM_AGG_VAL_SEL_REG0 4 +#define DQ_UCM_AGG_VAL_SEL_REG1 5 +#define DQ_UCM_AGG_VAL_SEL_REG2 6 +#define DQ_UCM_AGG_VAL_SEL_REG3 7 /* UCM agg val selection (FW) */ #define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 @@ -263,7 +262,7 @@ #define DQ_TCM_ROCE_RQ_PROD_CMD \ DQ_TCM_AGG_VAL_SEL_WORD0 -/* XCM agg counter flag selection */ +/* XCM agg counter flag selection (HW) */ #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 #define DQ_XCM_AGG_FLG_SHIFT_CF12 2 @@ -273,20 +272,20 @@ #define DQ_XCM_AGG_FLG_SHIFT_CF22 6 #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 -/* XCM agg counter flag selection */ -#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) -#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) -#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +/* XCM agg counter flag selection (FW) */ +#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) /* UCM agg counter flag selection (HW) */ #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 @@ -317,9 +316,9 @@ #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 /* TCM agg counter flag selection (FW) */ -#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) -#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) -#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) +#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) @@ -327,18 +326,18 @@ #define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) /* PWM address mapping */ -#define DQ_PWM_OFFSET_DPM_BASE 0x0 -#define DQ_PWM_OFFSET_DPM_END 0x27 +#define DQ_PWM_OFFSET_DPM_BASE 0x0 +#define DQ_PWM_OFFSET_DPM_END 0x27 #define DQ_PWM_OFFSET_XCM16_BASE 0x40 #define DQ_PWM_OFFSET_XCM32_BASE 0x44 #define DQ_PWM_OFFSET_UCM16_BASE 0x48 #define DQ_PWM_OFFSET_UCM32_BASE 0x4C -#define DQ_PWM_OFFSET_UCM16_4 0x50 +#define DQ_PWM_OFFSET_UCM16_4 0x50 #define DQ_PWM_OFFSET_TCM16_BASE 0x58 #define DQ_PWM_OFFSET_TCM32_BASE 0x5C -#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 -#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 -#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B +#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 +#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 +#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) @@ -347,10 +346,11 @@ #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) -#define DQ_REGION_SHIFT (12) + +#define DQ_REGION_SHIFT (12) /* DPM */ -#define DQ_DPM_WQE_BUFF_SIZE (320) +#define DQ_DPM_WQE_BUFF_SIZE (320) /* Conn type ranges */ #define DQ_CONN_TYPE_RANGE_SHIFT (4) @@ -359,29 +359,30 @@ /* QM CONSTANTS */ /*****************/ -/* number of TX queues in the QM */ +/* Number of TX queues in the QM */ #define MAX_QM_TX_QUEUES_K2 512 #define MAX_QM_TX_QUEUES_BB 448 #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 -/* number of Other queues in the QM */ +/* Number of Other queues in the QM */ #define MAX_QM_OTHER_QUEUES_BB 64 #define MAX_QM_OTHER_QUEUES_K2 128 #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 -/* number of queues in a PF queue group */ +/* Number of queues in a PF queue group */ #define QM_PF_QUEUE_GROUP_SIZE 8 -/* the size of a single queue element in bytes */ -#define QM_PQ_ELEMENT_SIZE 4 +/* The size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 -/* base number of Tx PQs in the CM PQ representation. - * should be used when storing PQ IDs in CM PQ registers and context +/* Base number of Tx PQs in the CM PQ representation. + * Should be used when storing PQ IDs in CM PQ registers and context. */ -#define CM_TX_PQ_BASE 0x200 +#define CM_TX_PQ_BASE 0x200 -/* number of global Vport/QCN rate limiters */ +/* Number of global Vport/QCN rate limiters */ #define MAX_QM_GLOBAL_RLS 256 + /* QM registers data */ #define QM_LINE_CRD_REG_WIDTH 16 #define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) @@ -432,8 +433,7 @@ #define IGU_CMD_INT_ACK_BASE 0x0400 #define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ - MAX_TOT_SB_PER_PATH - \ - 1) + MAX_TOT_SB_PER_PATH - 1) #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 @@ -447,8 +447,7 @@ #define IGU_CMD_PROD_UPD_BASE 0x0600 #define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ - MAX_TOT_SB_PER_PATH - \ - 1) + MAX_TOT_SB_PER_PATH - 1) #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff /*****************/ @@ -514,129 +513,121 @@ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) /* PF BAR */ -#define PXP_BAR0_START_GRC 0x0000 -#define PXP_BAR0_GRC_LENGTH 0x1C00000 -#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ - PXP_BAR0_GRC_LENGTH - 1) - -#define PXP_BAR0_START_IGU 0x1C00000 -#define PXP_BAR0_IGU_LENGTH 0x10000 -#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ - PXP_BAR0_IGU_LENGTH - 1) - -#define PXP_BAR0_START_TSDM 0x1C80000 -#define PXP_BAR0_SDM_LENGTH 0x40000 +#define PXP_BAR0_START_GRC 0x0000 +#define PXP_BAR0_GRC_LENGTH 0x1C00000 +#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ + PXP_BAR0_GRC_LENGTH - 1) + +#define PXP_BAR0_START_IGU 0x1C00000 +#define PXP_BAR0_IGU_LENGTH 0x10000 +#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ + PXP_BAR0_IGU_LENGTH - 1) + +#define PXP_BAR0_START_TSDM 0x1C80000 +#define PXP_BAR0_SDM_LENGTH 0x40000 #define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 -#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_MSDM 0x1D00000 -#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_MSDM 0x1D00000 +#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_USDM 0x1D80000 -#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_USDM 0x1D80000 +#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_XSDM 0x1E00000 -#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_XSDM 0x1E00000 +#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_YSDM 0x1E80000 -#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_YSDM 0x1E80000 +#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_PSDM 0x1F00000 -#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_PSDM 0x1F00000 +#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ + PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) /* VF BAR */ -#define PXP_VF_BAR0 0 - -#define PXP_VF_BAR0_START_GRC 0x3E00 -#define PXP_VF_BAR0_GRC_LENGTH 0x200 -#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ - PXP_VF_BAR0_GRC_LENGTH - 1) - -#define PXP_VF_BAR0_START_IGU 0 -#define PXP_VF_BAR0_IGU_LENGTH 0x3000 -#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ - PXP_VF_BAR0_IGU_LENGTH - 1) - -#define PXP_VF_BAR0_START_DQ 0x3000 -#define PXP_VF_BAR0_DQ_LENGTH 0x200 -#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 -#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ - PXP_VF_BAR0_DQ_OPAQUE_OFFSET) -#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ - + 4) -#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ - PXP_VF_BAR0_DQ_LENGTH - 1) - -#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 -#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 -#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 -#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 -#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 -#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 -#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 -#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 -#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 - -#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 - -#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 -#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 +#define PXP_VF_BAR0 0 + +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + + 4) +#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_GRC 0x3E00 +#define PXP_VF_BAR0_GRC_LENGTH 0x200 +#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ + PXP_VF_BAR0_GRC_LENGTH - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) -#define PXP_QUEUES_ZONE_MAX_NUM 320 + +/* Host Interface */ +#define PXP_QUEUES_ZONE_MAX_NUM 320 + /*****************/ /* PRM CONSTANTS */ /*****************/ -#define PRM_DMA_PAD_BYTES_NUM 2 +#define PRM_DMA_PAD_BYTES_NUM 2 + /*****************/ /* SDMs CONSTANTS */ /*****************/ -#define SDM_OP_GEN_TRIG_NONE 0 -#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 -#define SDM_OP_GEN_TRIG_AGG_INT 2 -#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 #define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 #define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 @@ -644,26 +635,26 @@ /* Completion types */ /********************/ -#define SDM_COMP_TYPE_NONE 0 -#define SDM_COMP_TYPE_WAKE_THREAD 1 -#define SDM_COMP_TYPE_AGG_INT 2 -#define SDM_COMP_TYPE_CM 3 -#define SDM_COMP_TYPE_LOADER 4 -#define SDM_COMP_TYPE_PXP 5 -#define SDM_COMP_TYPE_INDICATE_ERROR 6 -#define SDM_COMP_TYPE_RELEASE_THREAD 7 -#define SDM_COMP_TYPE_RAM 8 -#define SDM_COMP_TYPE_INC_ORDER_CNT 9 +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +#define SDM_COMP_TYPE_PXP 5 +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +#define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /*****************/ -/* PBF Constants */ +/* PBF CONSTANTS */ /*****************/ /* Number of PBF command queue lines. Each line is 32B. */ -#define PBF_MAX_CMD_LINES 3328 +#define PBF_MAX_CMD_LINES 3328 /* Number of BTB blocks. Each block is 256B. */ -#define BTB_MAX_BLOCKS 1440 +#define BTB_MAX_BLOCKS 1440 /*****************/ /* PRS CONSTANTS */ @@ -679,6 +670,7 @@ struct async_data { u8 fw_debug_param; }; +/* Interrupt coalescing TimeSet */ struct coalescing_timeset { u8 value; #define COALESCING_TIMESET_TIMESET_MASK 0x7F @@ -692,20 +684,12 @@ struct common_queue_zone { __le16 reserved; }; +/* ETH Rx producers data */ struct eth_rx_prod_data { __le16 bd_prod; __le16 cqe_prod; }; -struct regpair { - __le32 lo; - __le32 hi; -}; - -struct vf_pf_channel_eqe_data { - struct regpair msg_addr; -}; - struct iscsi_eqe_data { __le32 cid; __le16 conn_id; @@ -719,52 +703,6 @@ struct iscsi_eqe_data { #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 }; -struct rdma_eqe_destroy_qp { - __le32 cid; - u8 reserved[4]; -}; - -union rdma_eqe_data { - struct regpair async_handle; - struct rdma_eqe_destroy_qp rdma_destroy_qp_data; -}; - -struct malicious_vf_eqe_data { - u8 vf_id; - u8 err_id; - __le16 reserved[3]; -}; - -struct initial_cleanup_eqe_data { - u8 vf_id; - u8 reserved[7]; -}; - -/* Event Data Union */ -union event_ring_data { - u8 bytes[8]; - struct vf_pf_channel_eqe_data vf_pf_channel; - struct iscsi_eqe_data iscsi_info; - union rdma_eqe_data rdma_data; - struct malicious_vf_eqe_data malicious_vf; - struct initial_cleanup_eqe_data vf_init_cleanup; -}; - -/* Event Ring Entry */ -struct event_ring_entry { - u8 protocol_id; - u8 opcode; - __le16 reserved0; - __le16 echo; - u8 fw_return_code; - u8 flags; -#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 -#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 -#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F -#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 - union event_ring_data data; -}; - /* Multi function mode */ enum mf_mode { ERROR_MODE /* Unsupported mode */, @@ -781,13 +719,31 @@ enum protocol_type { PROTOCOLID_CORE, PROTOCOLID_ETH, PROTOCOLID_IWARP, - PROTOCOLID_RESERVED5, + PROTOCOLID_RESERVED0, PROTOCOLID_PREROCE, PROTOCOLID_COMMON, - PROTOCOLID_RESERVED6, + PROTOCOLID_RESERVED1, MAX_PROTOCOL_TYPE }; +struct regpair { + __le32 lo; + __le32 hi; +}; + +/* RoCE Destroy Event Data */ +struct rdma_eqe_destroy_qp { + __le32 cid; + u8 reserved[4]; +}; + +/* RDMA Event Data Union */ +union rdma_eqe_data { + struct regpair async_handle; + struct rdma_eqe_destroy_qp rdma_destroy_qp_data; +}; + +/* Ustorm Queue Zone */ struct ustorm_eth_queue_zone { struct coalescing_timeset int_coalescing_timeset; u8 reserved[3]; @@ -798,62 +754,71 @@ struct ustorm_queue_zone { struct common_queue_zone common; }; -/* status block structure */ +/* Status block structure */ struct cau_pi_entry { u32 prod; -#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF -#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 -#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F -#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 -#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 -#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 -#define CAU_PI_ENTRY_RESERVED_MASK 0xFF -#define CAU_PI_ENTRY_RESERVED_SHIFT 24 +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 }; -/* status block structure */ +/* Status block structure */ struct cau_sb_entry { u32 data; -#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF -#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 -#define CAU_SB_ENTRY_STATE0_MASK 0xF -#define CAU_SB_ENTRY_STATE0_SHIFT 24 -#define CAU_SB_ENTRY_STATE1_MASK 0xF -#define CAU_SB_ENTRY_STATE1_SHIFT 28 +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 u32 params; -#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F -#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 -#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F -#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 -#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 -#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 -#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 -#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 -#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF -#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 -#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 -#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 -#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF -#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 -#define CAU_SB_ENTRY_TPH_MASK 0x1 -#define CAU_SB_ENTRY_TPH_SHIFT 31 +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 +}; + +/* Igu cleanup bit values to distinguish between clean or producer consumer + * update. + */ +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT }; -/* core doorbell data */ +/* Core doorbell data */ struct core_db_data { u8 params; -#define CORE_DB_DATA_DEST_MASK 0x3 -#define CORE_DB_DATA_DEST_SHIFT 0 -#define CORE_DB_DATA_AGG_CMD_MASK 0x3 -#define CORE_DB_DATA_AGG_CMD_SHIFT 2 -#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 -#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 -#define CORE_DB_DATA_RESERVED_MASK 0x1 -#define CORE_DB_DATA_RESERVED_SHIFT 5 -#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 spq_prod; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; }; /* Enum of doorbell aggregative command selection */ @@ -909,67 +874,69 @@ struct db_l2_dpm_sge { struct regpair addr; __le16 nbytes; __le16 bitfields; -#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF -#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 -#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 -#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 -#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 -#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 -#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF -#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 +#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF +#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 +#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 +#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 +#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 +#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 +#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF +#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 __le32 reserved2; }; /* Structure for doorbell address, in legacy mode */ struct db_legacy_addr { __le32 addr; -#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 -#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 -#define DB_LEGACY_ADDR_DEMS_MASK 0x7 -#define DB_LEGACY_ADDR_DEMS_SHIFT 2 -#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF -#define DB_LEGACY_ADDR_ICID_SHIFT 5 +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 }; /* Structure for doorbell address, in PWM mode */ struct db_pwm_addr { __le32 addr; #define DB_PWM_ADDR_RESERVED0_MASK 0x7 -#define DB_PWM_ADDR_RESERVED0_SHIFT 0 -#define DB_PWM_ADDR_OFFSET_MASK 0x7F +#define DB_PWM_ADDR_RESERVED0_SHIFT 0 +#define DB_PWM_ADDR_OFFSET_MASK 0x7F #define DB_PWM_ADDR_OFFSET_SHIFT 3 -#define DB_PWM_ADDR_WID_MASK 0x3 -#define DB_PWM_ADDR_WID_SHIFT 10 -#define DB_PWM_ADDR_DPI_MASK 0xFFFF -#define DB_PWM_ADDR_DPI_SHIFT 12 +#define DB_PWM_ADDR_WID_MASK 0x3 +#define DB_PWM_ADDR_WID_SHIFT 10 +#define DB_PWM_ADDR_DPI_MASK 0xFFFF +#define DB_PWM_ADDR_DPI_SHIFT 12 #define DB_PWM_ADDR_RESERVED1_MASK 0xF -#define DB_PWM_ADDR_RESERVED1_SHIFT 28 +#define DB_PWM_ADDR_RESERVED1_SHIFT 28 }; -/* Parameters to RoCE firmware, passed in EDPM doorbell */ +/* Parameters to RDMA firmware, passed in EDPM doorbell */ struct db_rdma_dpm_params { __le32 params; -#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F -#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 -#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 -#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 -#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF -#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 -#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF -#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 -#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 -#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; -/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ +/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a + * DPM burst. + */ struct db_rdma_dpm_data { __le16 icid; __le16 prod_val; @@ -988,20 +955,20 @@ enum igu_int_cmd { /* IGU producer or consumer update command */ struct igu_prod_cons_update { u32 sb_id_and_flags; -#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF -#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 -#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 -#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 -#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 -#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 -#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 -#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 -#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 -#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 u32 reserved1; }; @@ -1014,36 +981,37 @@ enum igu_seg_access { struct parsing_and_err_flags { __le16 flags; -#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 -#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 -#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 -#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 -#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 -#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 -#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 -#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 -#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 -#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 -#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 -#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 -#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 -#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; +/* Parsing error flags bitmap */ struct parsing_err_flags { __le16 flags; #define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 @@ -1080,168 +1048,160 @@ struct parsing_err_flags { #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 }; +/* Pb context */ struct pb_context { __le32 crc[4]; }; +/* Concrete Function ID */ struct pxp_concrete_fid { __le16 fid; -#define PXP_CONCRETE_FID_PFID_MASK 0xF -#define PXP_CONCRETE_FID_PFID_SHIFT 0 -#define PXP_CONCRETE_FID_PORT_MASK 0x3 -#define PXP_CONCRETE_FID_PORT_SHIFT 4 -#define PXP_CONCRETE_FID_PATH_MASK 0x1 -#define PXP_CONCRETE_FID_PATH_SHIFT 6 -#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 -#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 -#define PXP_CONCRETE_FID_VFID_MASK 0xFF -#define PXP_CONCRETE_FID_VFID_SHIFT 8 +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 }; +/* Concrete Function ID */ struct pxp_pretend_concrete_fid { __le16 fid; -#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF -#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 -#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 -#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 -#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 -#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 -#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF -#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 }; +/* Function ID */ union pxp_pretend_fid { struct pxp_pretend_concrete_fid concrete_fid; - __le16 opaque_fid; + __le16 opaque_fid; }; -/* Pxp Pretend Command Register. */ +/* Pxp Pretend Command Register */ struct pxp_pretend_cmd { - union pxp_pretend_fid fid; - __le16 control; -#define PXP_PRETEND_CMD_PATH_MASK 0x1 -#define PXP_PRETEND_CMD_PATH_SHIFT 0 -#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 -#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 -#define PXP_PRETEND_CMD_PORT_MASK 0x3 -#define PXP_PRETEND_CMD_PORT_SHIFT 2 -#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF -#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 -#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF -#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 -#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 -#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 -#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 -#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 -#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 }; -/* PTT Record in PXP Admin Window. */ +/* PTT Record in PXP Admin Window */ struct pxp_ptt_entry { - __le32 offset; -#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF -#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 -#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF -#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 - struct pxp_pretend_cmd pretend; + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; }; -/* VF Zone A Permission Register. */ +/* VF Zone A Permission Register */ struct pxp_vf_zone_a_permission { __le32 control; -#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF -#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 -#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 -#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 -#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F -#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 -#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF -#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 +#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF +#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 +#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 +#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 }; -/* RSS hash type */ +/* Rdif context */ struct rdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; u8 flags0; -#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 -#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 -#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 -#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 -#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 -#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 -#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 -#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 -#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 -#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 -#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 -#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 -#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7 u8 partial_dif_data[7]; __le16 partial_crc_value; __le16 partial_checksum_value; __le32 offset_in_io; __le16 flags1; -#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 -#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 -#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 -#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 -#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 -#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 -#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 -#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 -#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 -#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 -#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 -#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 -#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 -#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 -#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 -#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 +#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 __le16 state; -#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF -#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 -#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF -#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 -#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 -#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 -#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 -#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 -#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF -#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 -#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 -#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0 +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9 +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10 +#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 +#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 __le32 reserved2; }; -/* RSS hash type */ -enum rss_hash_type { - RSS_HASH_TYPE_DEFAULT = 0, - RSS_HASH_TYPE_IPV4 = 1, - RSS_HASH_TYPE_TCP_IPV4 = 2, - RSS_HASH_TYPE_IPV6 = 3, - RSS_HASH_TYPE_TCP_IPV6 = 4, - RSS_HASH_TYPE_UDP_IPV4 = 5, - RSS_HASH_TYPE_UDP_IPV6 = 6, - MAX_RSS_HASH_TYPE -}; - -/* status block structure */ +/* Status block structure */ struct status_block { __le16 pi_array[PIS_PER_SB]; __le32 sb_num; @@ -1258,88 +1218,90 @@ struct status_block { #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 }; +/* Tdif context */ struct tdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; - __le16 partial_crc_valueB; - __le16 partial_checksum_valueB; + __le16 partial_crc_value_b; + __le16 partial_checksum_value_b; __le16 stateB; -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 -#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 -#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 -#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F -#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9 +#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F +#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 u8 reserved1; u8 flags0; -#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 -#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 -#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 -#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 -#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 -#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 -#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 -#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 -#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 -#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 -#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 -#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 __le32 flags1; -#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 -#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 -#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 -#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 -#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 -#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 -#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 -#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 -#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 -#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 -#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 -#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 -#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 -#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 -#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 -#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 -#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF -#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 -#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 -#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 -#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 -#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 - __le32 offset_in_iob; +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23 +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30 +#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 + __le32 offset_in_io_b; __le16 partial_crc_value_a; - __le16 partial_checksum_valuea_; - __le32 offset_in_ioa; + __le16 partial_checksum_value_a; + __le32 offset_in_io_a; u8 partial_dif_data_a[8]; u8 partial_dif_data_b[8]; }; +/* Timers context */ struct timers_context { __le32 logical_client_0; #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF @@ -1385,6 +1347,7 @@ struct timers_context { #define TIMERS_CONTEXT_RESERVED7_SHIFT 29 }; +/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */ enum tunnel_next_protocol { e_unknown = 0, e_l2 = 1, diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index cb06e6e368e1..f554f4b58dfe 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -36,139 +36,140 @@ /********************/ /* ETH FW CONSTANTS */ /********************/ -#define ETH_HSI_VER_MAJOR 3 -#define ETH_HSI_VER_MINOR 10 + +#define ETH_HSI_VER_MAJOR 3 +#define ETH_HSI_VER_MINOR 10 #define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 -#define ETH_CACHE_LINE_SIZE 64 -#define ETH_RX_CQE_GAP 32 -#define ETH_MAX_RAMROD_PER_CON 8 -#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 -#define ETH_RX_NUM_NEXT_PAGE_BDS 2 - -#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 -#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 - -#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 -#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 -#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 -#define ETH_TX_MAX_LSO_HDR_NBD 4 -#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 -#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 -#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 -#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 -#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) -#define ETH_TX_MAX_LSO_HDR_BYTES 510 -#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) -#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 -#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 -#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 -#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF - -#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define ETH_CACHE_LINE_SIZE 64 +#define ETH_RX_CQE_GAP 32 +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 + +#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 +#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 +#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) +#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 +#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 +#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 +#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF + +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ -#define ETH_RX_MAX_BUFF_PER_PKT 5 -#define ETH_RX_BD_THRESHOLD 12 +#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_BD_THRESHOLD 12 -/* num of MAC/VLAN filters */ -#define ETH_NUM_MAC_FILTERS 512 -#define ETH_NUM_VLAN_FILTERS 512 +/* Num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 -/* approx. multicast constants */ -#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 -#define ETH_MULTICAST_MAC_BINS 256 -#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) +/* Approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) -/* ethernet vport update constants */ -#define ETH_FILTER_RULES_COUNT 10 -#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 -#define ETH_RSS_KEY_SIZE_REGS 10 -#define ETH_RSS_ENGINE_NUM_K2 207 -#define ETH_RSS_ENGINE_NUM_BB 127 +/* Ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 /* TPA constants */ -#define ETH_TPA_MAX_AGGS_NUM 64 -#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT -#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 -#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Control frame check constants */ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 struct eth_tx_1st_bd_flags { u8 bitfields; -#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 -#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 -#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 -#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 }; -/* The parsing information data fo rthe first tx bd of a given packet. */ +/* The parsing information data fo rthe first tx bd of a given packet */ struct eth_tx_data_1st_bd { __le16 vlan; u8 nbds; struct eth_tx_1st_bd_flags bd_flags; __le16 bitfields; -#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 -#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 -#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF -#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 }; -/* The parsing information data for the second tx bd of a given packet. */ +/* The parsing information data for the second tx bd of a given packet */ struct eth_tx_data_2nd_bd { __le16 tunn_ip_size; __le16 bitfields1; -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 -#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 -#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 -#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 __le16 bitfields2; -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 -#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; -/* Firmware data for L2-EDPM packet. */ +/* Firmware data for L2-EDPM packet */ struct eth_edpm_fw_data { struct eth_tx_data_1st_bd data_1st_bd; struct eth_tx_data_2nd_bd data_2nd_bd; @@ -179,7 +180,7 @@ struct eth_fast_path_cqe_fw_debug { __le16 reserved2; }; -/* tunneling parsing flags */ +/* Tunneling parsing flags */ struct eth_tunnel_parsing_flags { u8 flags; #define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 @@ -199,24 +200,24 @@ struct eth_tunnel_parsing_flags { /* PMD flow control bits */ struct eth_pmd_flow_flags { u8 flags; -#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 -#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 -#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 -#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 -#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F -#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 +#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 +#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F +#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 }; -/* Regular ETH Rx FP CQE. */ +/* Regular ETH Rx FP CQE */ struct eth_fast_path_rx_reg_cqe { u8 type; u8 bitfields; -#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 -#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF -#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 __le16 pkt_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; @@ -231,7 +232,7 @@ struct eth_fast_path_rx_reg_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-continue ETH Rx FP CQE. */ +/* TPA-continue ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_cont_cqe { u8 type; u8 tpa_agg_index; @@ -243,7 +244,7 @@ struct eth_fast_path_rx_tpa_cont_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-end ETH Rx FP CQE. */ +/* TPA-end ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_end_cqe { u8 type; u8 tpa_agg_index; @@ -259,16 +260,16 @@ struct eth_fast_path_rx_tpa_end_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-start ETH Rx FP CQE. */ +/* TPA-start ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_start_cqe { u8 type; u8 bitfields; -#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF -#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 __le16 seg_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; @@ -295,24 +296,24 @@ struct eth_rx_bd { struct regpair addr; }; -/* regular ETH Rx SP CQE */ +/* Regular ETH Rx SP CQE */ struct eth_slow_path_rx_cqe { - u8 type; - u8 ramrod_cmd_id; - u8 error_flag; - u8 reserved[25]; - __le16 echo; - u8 reserved1; + u8 type; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[25]; + __le16 echo; + u8 reserved1; struct eth_pmd_flow_flags pmd_flags; }; -/* union for all ETH Rx CQE types */ +/* Union for all ETH Rx CQE types */ union eth_rx_cqe { - struct eth_fast_path_rx_reg_cqe fast_path_regular; - struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; - struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; - struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; - struct eth_slow_path_rx_cqe slow_path; + struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; + struct eth_slow_path_rx_cqe slow_path; }; /* ETH Rx CQE type */ @@ -339,7 +340,7 @@ enum eth_rx_tunn_type { MAX_ETH_RX_TUNN_TYPE }; -/* Aggregation end reason. */ +/* Aggregation end reason. */ enum eth_tpa_end_reason { ETH_AGG_END_UNUSED, ETH_AGG_END_SP_UPDATE, @@ -354,59 +355,59 @@ enum eth_tpa_end_reason { /* The first tx bd of a given packet */ struct eth_tx_1st_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_1st_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_1st_bd data; }; /* The second tx bd of a given packet */ struct eth_tx_2nd_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_2nd_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_2nd_bd data; }; -/* The parsing information data for the third tx bd of a given packet. */ +/* The parsing information data for the third tx bd of a given packet */ struct eth_tx_data_3rd_bd { __le16 lso_mss; __le16 bitfields; -#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF -#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 -#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF -#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 -#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F -#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 u8 tunn_l4_hdr_start_offset_w; u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ struct eth_tx_3rd_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_3rd_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_3rd_bd data; }; -/* Complementary information for the regular tx bd of a given packet. */ +/* Complementary information for the regular tx bd of a given packet */ struct eth_tx_data_bd { - __le16 reserved0; - __le16 bitfields; -#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF -#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 -#define ETH_TX_DATA_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F -#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 __le16 reserved3; }; /* The common non-special TX BD ring element */ struct eth_tx_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_bd data; }; union eth_tx_bd_types { @@ -434,18 +435,30 @@ struct xstorm_eth_queue_zone { /* ETH doorbell data */ struct eth_db_data { u8 params; -#define ETH_DB_DATA_DEST_MASK 0x3 -#define ETH_DB_DATA_DEST_SHIFT 0 -#define ETH_DB_DATA_AGG_CMD_MASK 0x3 -#define ETH_DB_DATA_AGG_CMD_SHIFT 2 -#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 -#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 -#define ETH_DB_DATA_RESERVED_MASK 0x1 -#define ETH_DB_DATA_RESERVED_SHIFT 5 -#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 bd_prod; }; +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + #endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 12fc9e788eea..e720a7b37b0b 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -8,217 +8,78 @@ #ifndef __FCOE_COMMON__ #define __FCOE_COMMON__ + /*********************/ /* FCOE FW CONSTANTS */ /*********************/ #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 -struct fcoe_abts_pkt { - __le32 abts_rsp_fc_payload_lo; - __le16 abts_rsp_rx_id; - u8 abts_rsp_rctl; - u8 reserved2; -}; - -/* FCoE additional WQE (Sq/XferQ) information */ -union fcoe_additional_info_union { - __le32 previous_tid; - __le32 parent_tid; - __le32 burst_length; - __le32 seq_rec_updated_offset; -}; - -struct fcoe_exp_ro { - __le32 data_offset; - __le32 reserved; -}; - -union fcoe_cleanup_addr_exp_ro_union { - struct regpair abts_rsp_fc_payload_hi; - struct fcoe_exp_ro exp_ro; -}; - -/* FCoE Ramrod Command IDs */ -enum fcoe_completion_status { - FCOE_COMPLETION_STATUS_SUCCESS, - FCOE_COMPLETION_STATUS_FCOE_VER_ERR, - FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, - MAX_FCOE_COMPLETION_STATUS -}; - -struct fc_addr_nw { - u8 addr_lo; - u8 addr_mid; - u8 addr_hi; -}; - -/* FCoE connection offload */ -struct fcoe_conn_offload_ramrod_data { - struct regpair sq_pbl_addr; - struct regpair sq_curr_page_addr; - struct regpair sq_next_page_addr; - struct regpair xferq_pbl_addr; - struct regpair xferq_curr_page_addr; - struct regpair xferq_next_page_addr; - struct regpair respq_pbl_addr; - struct regpair respq_curr_page_addr; - struct regpair respq_next_page_addr; - __le16 dst_mac_addr_lo; - __le16 dst_mac_addr_mid; - __le16 dst_mac_addr_hi; - __le16 src_mac_addr_lo; - __le16 src_mac_addr_mid; - __le16 src_mac_addr_hi; - __le16 tx_max_fc_pay_len; - __le16 e_d_tov_timer_val; - __le16 rx_max_fc_pay_len; - __le16 vlan_tag; -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 - __le16 physical_q0; - __le16 rec_rr_tov_timer_val; - struct fc_addr_nw s_id; - u8 max_conc_seqs_c3; - struct fc_addr_nw d_id; - u8 flags; -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 - __le16 conn_id; - u8 def_q_idx; - u8 reserved[5]; -}; - -/* FCoE terminate connection request */ -struct fcoe_conn_terminate_ramrod_data { - struct regpair terminate_params_addr; -}; - -struct fcoe_slow_sgl_ctx { - struct regpair base_sgl_addr; - __le16 curr_sge_off; - __le16 remainder_num_sges; - __le16 curr_sgl_index; - __le16 reserved; -}; - -union fcoe_dix_desc_ctx { - struct fcoe_slow_sgl_ctx dix_sgl; - struct scsi_sge cached_dix_sge; +/* The fcoe storm task context protection-information of Ystorm */ +struct protection_info_ctx { + __le16 flags; +#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 +#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 +#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F +#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 + u8 dix_block_size; + u8 dst_size; }; -struct fcoe_fast_sgl_ctx { - struct regpair sgl_start_addr; - __le32 sgl_byte_offset; - __le16 task_reuse_cnt; - __le16 init_offset_in_first_sge; +/* The fcoe storm task context protection-information of Ystorm */ +union protection_info_union_ctx { + struct protection_info_ctx info; + __le32 value; }; +/* FCP CMD payload */ struct fcoe_fcp_cmd_payload { __le32 opaque[8]; }; +/* FCP RSP payload */ struct fcoe_fcp_rsp_payload { __le32 opaque[6]; }; -struct fcoe_fcp_xfer_payload { - __le32 opaque[3]; -}; - -/* FCoE firmware function init */ -struct fcoe_init_func_ramrod_data { - struct scsi_init_func_params func_params; - struct scsi_init_func_queues q_params; - __le16 mtu; - __le16 sq_num_pages_in_pbl; - __le32 reserved; -}; - -/* FCoE: Mode of the connection: Target or Initiator or both */ -enum fcoe_mode_type { - FCOE_INITIATOR_MODE = 0x0, - FCOE_TARGET_MODE = 0x1, - FCOE_BOTH_OR_NOT_CHOSEN = 0x3, - MAX_FCOE_MODE_TYPE -}; - -struct fcoe_rx_stat { - struct regpair fcoe_rx_byte_cnt; - struct regpair fcoe_rx_data_pkt_cnt; - struct regpair fcoe_rx_xfer_pkt_cnt; - struct regpair fcoe_rx_other_pkt_cnt; - __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; - __le32 fcoe_silent_drop_pkt_rq_full_cnt; - __le32 fcoe_silent_drop_pkt_crc_error_cnt; - __le32 fcoe_silent_drop_pkt_task_invalid_cnt; - __le32 fcoe_silent_drop_total_pkt_cnt; - __le32 rsrv; -}; - -struct fcoe_stat_ramrod_data { - struct regpair stat_params_addr; -}; - -struct protection_info_ctx { - __le16 flags; -#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 -#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 -#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 -#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 -#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF -#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 -#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F -#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 - u8 dix_block_size; - u8 dst_size; -}; - -union protection_info_union_ctx { - struct protection_info_ctx info; - __le32 value; -}; - +/* FCP RSP payload */ struct fcp_rsp_payload_padded { struct fcoe_fcp_rsp_payload rsp_payload; __le32 reserved[2]; }; +/* FCP RSP payload */ +struct fcoe_fcp_xfer_payload { + __le32 opaque[3]; +}; + +/* FCP RSP payload */ struct fcp_xfer_payload_padded { struct fcoe_fcp_xfer_payload xfer_payload; __le32 reserved[5]; }; +/* Task params */ struct fcoe_tx_data_params { __le32 data_offset; __le32 offset_in_io; u8 flags; -#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 -#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 -#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 -#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F -#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 +#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F +#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 u8 dif_residual; __le16 seq_cnt; __le16 single_sge_saved_offset; @@ -227,6 +88,7 @@ struct fcoe_tx_data_params { __le16 reserved3; }; +/* Middle path parameters: FC header fields provided by the driver */ struct fcoe_tx_mid_path_params { __le32 parameter; u8 r_ctl; @@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params { __le16 ox_id; }; +/* Task params */ struct fcoe_tx_params { struct fcoe_tx_data_params data; struct fcoe_tx_mid_path_params mid_path; }; +/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */ union fcoe_tx_info_union_ctx { struct fcoe_fcp_cmd_payload fcp_cmd_payload; struct fcp_rsp_payload_padded fcp_rsp_payload; @@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx { struct fcoe_tx_params tx_params; }; +/* Data sgl */ +struct fcoe_slow_sgl_ctx { + struct regpair base_sgl_addr; + __le16 curr_sge_off; + __le16 remainder_num_sges; + __le16 curr_sgl_index; + __le16 reserved; +}; + +/* Union of DIX SGL \ cached DIX sges */ +union fcoe_dix_desc_ctx { + struct fcoe_slow_sgl_ctx dix_sgl; + struct scsi_sge cached_dix_sge; +}; + +/* The fcoe storm task context of Ystorm */ struct ystorm_fcoe_task_st_ctx { u8 task_type; u8 sgl_mode; -#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 -#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 -#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F -#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 u8 cached_dix_sge; u8 expect_first_xfer; __le32 num_pbf_zero_write; @@ -277,44 +157,44 @@ struct ystorm_fcoe_task_ag_ctx { u8 byte1; __le16 word0; u8 flags0; -#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; @@ -333,68 +213,68 @@ struct tstorm_fcoe_task_ag_ctx { u8 byte1; __le16 icid; u8 flags0; -#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 #define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 #define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 u8 flags1; -#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 u8 flags3; -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 -#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 cleanup_state; __le16 last_sent_tid; __le32 rec_rr_tov_exp_timeout; @@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx { __le32 data_offset_next; }; +/* Cached data sges */ +struct fcoe_exp_ro { + __le32 data_offset; + __le32 reserved; +}; + +/* Union of Cleanup address \ expected relative offsets */ +union fcoe_cleanup_addr_exp_ro_union { + struct regpair abts_rsp_fc_payload_hi; + struct fcoe_exp_ro exp_ro; +}; + +/* Fields coppied from ABTSrsp pckt */ +struct fcoe_abts_pkt { + __le32 abts_rsp_fc_payload_lo; + __le16 abts_rsp_rx_id; + u8 abts_rsp_rctl; + u8 reserved2; +}; + +/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; __le16 flags; -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 __le16 seq_cnt; u8 seq_id; u8 ooo_rx_seq_id; @@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { __le16 reserved1; }; +/* FW read only part The fcoe task storm context of Tstorm */ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { u8 task_type; u8 dev_type; @@ -446,6 +348,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { __le32 rsrv; }; +/** The fcoe task storm context of Tstorm */ struct tstorm_fcoe_task_st_ctx { struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; @@ -456,44 +359,44 @@ struct mstorm_fcoe_task_ag_ctx { u8 byte1; __le16 icid; u8 flags0; -#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 -#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 cleanup_state; __le32 received_bytes; u8 byte3; @@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx { __le32 reg2; }; +/* The fcoe task storm context of Mstorm */ struct mstorm_fcoe_task_st_ctx { struct regpair rsp_buf_addr; __le32 rsrv[2]; @@ -515,26 +419,26 @@ struct mstorm_fcoe_task_st_ctx { __le32 data_buffer_offset; __le16 parent_id; __le16 flags; -#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF -#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 -#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 -#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 -#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 -#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 -#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 struct scsi_cached_sges data_desc; }; @@ -543,51 +447,51 @@ struct ustorm_fcoe_task_ag_ctx { u8 byte1; __le16 icid; u8 flags0; -#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; -#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 global_cq_num; @@ -596,6 +500,7 @@ struct ustorm_fcoe_task_ag_ctx { __le32 reg5; }; +/* FCoE task context */ struct fcoe_task_context { struct ystorm_fcoe_task_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; @@ -611,6 +516,129 @@ struct fcoe_task_context { struct rdif_task_context rdif_context; }; +/* FCoE additional WQE (Sq/XferQ) information */ +union fcoe_additional_info_union { + __le32 previous_tid; + __le32 parent_tid; + __le32 burst_length; + __le32 seq_rec_updated_offset; +}; + +/* FCoE Ramrod Command IDs */ +enum fcoe_completion_status { + FCOE_COMPLETION_STATUS_SUCCESS, + FCOE_COMPLETION_STATUS_FCOE_VER_ERR, + FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, + MAX_FCOE_COMPLETION_STATUS +}; + +/* FC address (SID/DID) network presentation */ +struct fc_addr_nw { + u8 addr_lo; + u8 addr_mid; + u8 addr_hi; +}; + +/* FCoE connection offload */ +struct fcoe_conn_offload_ramrod_data { + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le16 tx_max_fc_pay_len; + __le16 e_d_tov_timer_val; + __le16 rx_max_fc_pay_len; + __le16 vlan_tag; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 + __le16 physical_q0; + __le16 rec_rr_tov_timer_val; + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 + __le16 conn_id; + u8 def_q_idx; + u8 reserved[5]; +}; + +/* FCoE terminate connection request */ +struct fcoe_conn_terminate_ramrod_data { + struct regpair terminate_params_addr; +}; + +/* Data sgl */ +struct fcoe_fast_sgl_ctx { + struct regpair sgl_start_addr; + __le32 sgl_byte_offset; + __le16 task_reuse_cnt; + __le16 init_offset_in_first_sge; +}; + +/* FCoE firmware function init */ +struct fcoe_init_func_ramrod_data { + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; + __le16 mtu; + __le16 sq_num_pages_in_pbl; + __le32 reserved; +}; + +/* FCoE: Mode of the connection: Target or Initiator or both */ +enum fcoe_mode_type { + FCOE_INITIATOR_MODE = 0x0, + FCOE_TARGET_MODE = 0x1, + FCOE_BOTH_OR_NOT_CHOSEN = 0x3, + MAX_FCOE_MODE_TYPE +}; + +/* Per PF FCoE receive path statistics - tStorm RAM structure */ +struct fcoe_rx_stat { + struct regpair fcoe_rx_byte_cnt; + struct regpair fcoe_rx_data_pkt_cnt; + struct regpair fcoe_rx_xfer_pkt_cnt; + struct regpair fcoe_rx_other_pkt_cnt; + __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; + __le32 fcoe_silent_drop_pkt_rq_full_cnt; + __le32 fcoe_silent_drop_pkt_crc_error_cnt; + __le32 fcoe_silent_drop_pkt_task_invalid_cnt; + __le32 fcoe_silent_drop_total_pkt_cnt; + __le32 rsrv; +}; + +/* FCoe statistics request */ +struct fcoe_stat_ramrod_data { + struct regpair stat_params_addr; +}; + +/* Per PF FCoE transmit path statistics - pStorm RAM structure */ struct fcoe_tx_stat { struct regpair fcoe_tx_byte_cnt; struct regpair fcoe_tx_data_pkt_cnt; @@ -618,51 +646,55 @@ struct fcoe_tx_stat { struct regpair fcoe_tx_other_pkt_cnt; }; +/* FCoE SQ/XferQ element */ struct fcoe_wqe { __le16 task_id; __le16 flags; -#define FCOE_WQE_REQ_TYPE_MASK 0xF -#define FCOE_WQE_REQ_TYPE_SHIFT 0 -#define FCOE_WQE_SGL_MODE_MASK 0x1 -#define FCOE_WQE_SGL_MODE_SHIFT 4 -#define FCOE_WQE_CONTINUATION_MASK 0x1 -#define FCOE_WQE_CONTINUATION_SHIFT 5 -#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 -#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 -#define FCOE_WQE_RESERVED_MASK 0x1 -#define FCOE_WQE_RESERVED_SHIFT 7 -#define FCOE_WQE_NUM_SGES_MASK 0xF -#define FCOE_WQE_NUM_SGES_SHIFT 8 -#define FCOE_WQE_RESERVED1_MASK 0xF -#define FCOE_WQE_RESERVED1_SHIFT 12 +#define FCOE_WQE_REQ_TYPE_MASK 0xF +#define FCOE_WQE_REQ_TYPE_SHIFT 0 +#define FCOE_WQE_SGL_MODE_MASK 0x1 +#define FCOE_WQE_SGL_MODE_SHIFT 4 +#define FCOE_WQE_CONTINUATION_MASK 0x1 +#define FCOE_WQE_CONTINUATION_SHIFT 5 +#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 +#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 +#define FCOE_WQE_RESERVED_MASK 0x1 +#define FCOE_WQE_RESERVED_SHIFT 7 +#define FCOE_WQE_NUM_SGES_MASK 0xF +#define FCOE_WQE_NUM_SGES_SHIFT 8 +#define FCOE_WQE_RESERVED1_MASK 0xF +#define FCOE_WQE_RESERVED1_SHIFT 12 union fcoe_additional_info_union additional_info_union; }; +/* FCoE XFRQ element */ struct xfrqe_prot_flags { u8 flags; -#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF -#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 -#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 -#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 -#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 -#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 -#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 -#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 +#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 +#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 }; +/* FCoE doorbell data */ struct fcoe_db_data { u8 params; -#define FCOE_DB_DATA_DEST_MASK 0x3 -#define FCOE_DB_DATA_DEST_SHIFT 0 -#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 -#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 -#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 -#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 -#define FCOE_DB_DATA_RESERVED_MASK 0x1 -#define FCOE_DB_DATA_RESERVED_SHIFT 5 -#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define FCOE_DB_DATA_DEST_MASK 0x3 +#define FCOE_DB_DATA_DEST_SHIFT 0 +#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 +#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 +#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define FCOE_DB_DATA_RESERVED_MASK 0x1 +#define FCOE_DB_DATA_RESERVED_SHIFT 5 +#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 sq_prod; }; + #endif /* __FCOE_COMMON__ */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 85e086cba639..b8f83507f659 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -32,47 +32,48 @@ #ifndef __ISCSI_COMMON__ #define __ISCSI_COMMON__ + /**********************/ /* ISCSI FW CONSTANTS */ /**********************/ /* iSCSI HSI constants */ -#define ISCSI_DEFAULT_MTU (1500) +#define ISCSI_DEFAULT_MTU (1500) /* KWQ (kernel work queue) layer codes */ -#define ISCSI_SLOW_PATH_LAYER_CODE (6) +#define ISCSI_SLOW_PATH_LAYER_CODE (6) /* iSCSI parameter defaults */ -#define ISCSI_DEFAULT_HEADER_DIGEST (0) -#define ISCSI_DEFAULT_DATA_DIGEST (0) -#define ISCSI_DEFAULT_INITIAL_R2T (1) -#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) -#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) -#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) -#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) -#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) +#define ISCSI_DEFAULT_HEADER_DIGEST (0) +#define ISCSI_DEFAULT_DATA_DIGEST (0) +#define ISCSI_DEFAULT_INITIAL_R2T (1) +#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) +#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) +#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) +#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) /* iSCSI parameter limits */ -#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) -#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) -#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) -#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) -#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) -#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) +#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) +#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) +#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) +#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) -#define ISCSI_AHS_CNTL_SIZE 4 +#define ISCSI_AHS_CNTL_SIZE 4 -#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) +#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) /* iSCSI reserved params */ #define ISCSI_ITT_ALL_ONES (0xffffffff) #define ISCSI_TTT_ALL_ONES (0xffffffff) -#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 -#define ISCSI_OPTION_2_ON_CHIP_TCP 2 +#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 +#define ISCSI_OPTION_2_ON_CHIP_TCP 2 -#define ISCSI_INITIATOR_MODE 0 -#define ISCSI_TARGET_MODE 1 +#define ISCSI_INITIATOR_MODE 0 +#define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ #define ISCSI_OPCODE_NOP_OUT (0) @@ -84,41 +85,42 @@ #define ISCSI_OPCODE_LOGOUT_REQUEST (6) /* iSCSI response/messages op codes */ -#define ISCSI_OPCODE_NOP_IN (0x20) -#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) -#define ISCSI_OPCODE_TMF_RESPONSE (0x22) -#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) -#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) -#define ISCSI_OPCODE_DATA_IN (0x25) -#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) -#define ISCSI_OPCODE_R2T (0x31) -#define ISCSI_OPCODE_ASYNC_MSG (0x32) -#define ISCSI_OPCODE_REJECT (0x3f) +#define ISCSI_OPCODE_NOP_IN (0x20) +#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) +#define ISCSI_OPCODE_TMF_RESPONSE (0x22) +#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) +#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) +#define ISCSI_OPCODE_DATA_IN (0x25) +#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) +#define ISCSI_OPCODE_R2T (0x31) +#define ISCSI_OPCODE_ASYNC_MSG (0x32) +#define ISCSI_OPCODE_REJECT (0x3f) /* iSCSI stages */ -#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) -#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) -#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) +#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) +#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) +#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) /* iSCSI CQE errors */ -#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) -#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) +#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) +#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) +/* ISCSI SGL entry */ struct cqe_error_bitmap { u8 cqe_error_status_bits; -#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 -#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 -#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 -#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 -#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 -#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 -#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 +#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 +#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 }; union cqe_error_status { @@ -126,86 +128,72 @@ union cqe_error_status { struct cqe_error_bitmap error_bits; }; +/* iSCSI Login Response PDU header */ struct data_hdr { __le32 data[12]; }; -struct iscsi_async_msg_hdr { - __le16 reserved0; - u8 flags_attr; -#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F -#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 -#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 -#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 - u8 opcode; - __le32 hdr_second_dword; -#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 - struct regpair lun; - __le32 all_ones; - __le32 reserved1; - __le32 stat_sn; - __le32 exp_cmd_sn; - __le32 max_cmd_sn; - __le16 param1_rsrv; - u8 async_vcode; - u8 async_event; - __le16 param3_rsrv; - __le16 param2_rsrv; - __le32 reserved7; +/* Union of data/r2t sequence number */ +union iscsi_seq_num { + __le16 data_sn; + __le16 r2t_sn; }; -struct iscsi_cmd_hdr { - __le16 reserved1; - u8 flags_attr; -#define ISCSI_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_CMD_HDR_READ_MASK 0x1 -#define ISCSI_CMD_HDR_READ_SHIFT 6 -#define ISCSI_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_CMD_HDR_FINAL_SHIFT 7 - u8 hdr_first_byte; -#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F -#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 -#define ISCSI_CMD_HDR_IMM_MASK 0x1 -#define ISCSI_CMD_HDR_IMM_SHIFT 6 -#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 -#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 - __le32 hdr_second_dword; -#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 - struct regpair lun; - __le32 itt; - __le32 expected_transfer_length; - __le32 cmd_sn; - __le32 exp_stat_sn; - __le32 cdb[4]; +/* iSCSI DIF flags */ +struct iscsi_dif_flags { + u8 flags; +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 }; +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_state { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 exp_r2t_sn; + __le32 buffer_offset; + union iscsi_seq_num seq_num; + struct iscsi_dif_flags dif_flags; + u8 flags; +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2 +}; + +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_rxmit_opt { + __le32 fast_rxmit_sge_offset; + __le32 scan_start_buffer_offset; + __le32 fast_rxmit_buffer_offset; + u8 scan_start_sgl_index; + u8 fast_rxmit_sgl_index; + __le16 reserved; +}; + +/* iSCSI Common PDU header */ struct iscsi_common_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 hdr_first_byte; -#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F -#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 -#define ISCSI_COMMON_HDR_IMM_MASK 0x1 -#define ISCSI_COMMON_HDR_IMM_SHIFT 6 -#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 -#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 +#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F +#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 +#define ISCSI_COMMON_HDR_IMM_MASK 0x1 +#define ISCSI_COMMON_HDR_IMM_SHIFT 6 +#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 +#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 __le32 hdr_second_dword; -#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun_reserved; __le32 itt; __le32 ttt; @@ -215,86 +203,60 @@ struct iscsi_common_hdr { __le32 data[3]; }; -struct iscsi_conn_offload_params { - struct regpair sq_pbl_addr; - struct regpair r2tq_pbl_addr; - struct regpair xhq_pbl_addr; - struct regpair uhq_pbl_addr; - __le32 initial_ack; - __le16 physical_q0; - __le16 physical_q1; - u8 flags; -#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 -#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 - u8 pbl_page_size_log; - u8 pbe_page_size_log; - u8 default_cq; - __le32 stat_sn; -}; - -struct iscsi_slow_path_hdr { - u8 op_code; - u8 flags; -#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF -#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 -#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 -#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 -#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 -#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 -}; - -struct iscsi_conn_update_ramrod_params { - struct iscsi_slow_path_hdr hdr; - __le16 conn_id; - __le32 fw_cid; - u8 flags; -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6 - u8 reserved0[3]; - __le32 max_seq_size; - __le32 max_send_pdu_length; - __le32 max_recv_pdu_length; - __le32 first_seq_length; +/* iSCSI Command PDU header */ +struct iscsi_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 hdr_first_byte; +#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F +#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 +#define ISCSI_CMD_HDR_IMM_MASK 0x1 +#define ISCSI_CMD_HDR_IMM_SHIFT 6 +#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 +#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 + __le32 hdr_second_dword; +#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; __le32 exp_stat_sn; + __le32 cdb[4]; }; +/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */ struct iscsi_ext_cdb_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 -#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF -#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 struct regpair lun; __le32 itt; __le32 expected_transfer_length; @@ -303,26 +265,27 @@ struct iscsi_ext_cdb_cmd_hdr { struct scsi_sge cdb_sge; }; +/* iSCSI login request PDU header */ struct iscsi_login_req_hdr { u8 version_min; u8 version_max; u8 flags_attr; -#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 -#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 -#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 -#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 -#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 -#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 +#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; @@ -334,6 +297,7 @@ struct iscsi_login_req_hdr { __le32 reserved2[4]; }; +/* iSCSI logout request PDU header */ struct iscsi_logout_req_hdr { __le16 reserved0; u8 reason_code; @@ -348,13 +312,14 @@ struct iscsi_logout_req_hdr { __le32 reserved4[4]; }; +/* iSCSI Data-out PDU header */ struct iscsi_data_out_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F -#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 -#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 -#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 +#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -368,22 +333,23 @@ struct iscsi_data_out_hdr { __le32 reserved5; }; +/* iSCSI Data-in PDU header */ struct iscsi_data_in_hdr { u8 status_rsvd; u8 reserved1; u8 flags; -#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 -#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 -#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 -#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 -#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 -#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 -#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 -#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 -#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 -#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 -#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 -#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 +#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 +#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 +#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 +#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 +#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 +#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 +#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -397,6 +363,7 @@ struct iscsi_data_in_hdr { __le32 residual_count; }; +/* iSCSI R2T PDU header */ struct iscsi_r2t_hdr { u8 reserved0[3]; u8 opcode; @@ -412,13 +379,14 @@ struct iscsi_r2t_hdr { __le32 desired_data_trns_len; }; +/* iSCSI NOP-out PDU header */ struct iscsi_nop_out_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F -#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 -#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 -#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 +#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -432,19 +400,20 @@ struct iscsi_nop_out_hdr { __le32 reserved6; }; +/* iSCSI NOP-in PDU header */ struct iscsi_nop_in_hdr { __le16 reserved0; u8 flags_attr; -#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F -#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 -#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 -#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 +#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -456,26 +425,27 @@ struct iscsi_nop_in_hdr { __le32 reserved7; }; +/* iSCSI Login Response PDU header */ struct iscsi_login_response_hdr { u8 version_active; u8 version_max; u8 flags_attr; -#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 -#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 -#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 -#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 -#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 -#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; @@ -490,16 +460,17 @@ struct iscsi_login_response_hdr { __le32 reserved4[2]; }; +/* iSCSI Logout Response PDU header */ struct iscsi_logout_response_hdr { u8 reserved1; u8 response; u8 flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 reserved2[2]; __le32 itt; __le32 reserved3; @@ -512,21 +483,22 @@ struct iscsi_logout_response_hdr { __le32 reserved5[1]; }; +/* iSCSI Text Request PDU header */ struct iscsi_text_request_hdr { __le16 reserved0; u8 flags_attr; -#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F -#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 -#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 -#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 -#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 -#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 +#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 +#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -535,21 +507,22 @@ struct iscsi_text_request_hdr { __le32 reserved4[4]; }; +/* iSCSI Text Response PDU header */ struct iscsi_text_response_hdr { __le16 reserved1; u8 flags; -#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F -#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 -#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 -#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 -#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -559,15 +532,16 @@ struct iscsi_text_response_hdr { __le32 reserved4[3]; }; +/* iSCSI TMF Request PDU header */ struct iscsi_tmf_request_hdr { __le16 reserved0; u8 function; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 rtt; @@ -584,10 +558,10 @@ struct iscsi_tmf_response_hdr { u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 itt; __le32 reserved1; @@ -597,16 +571,17 @@ struct iscsi_tmf_response_hdr { __le32 reserved4[3]; }; +/* iSCSI Response PDU header */ struct iscsi_response_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 snack_tag; @@ -618,16 +593,17 @@ struct iscsi_response_hdr { __le32 residual_count; }; +/* iSCSI Reject PDU header */ struct iscsi_reject_hdr { u8 reserved4; u8 hdr_reason; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 all_ones; __le32 reserved2; @@ -638,6 +614,35 @@ struct iscsi_reject_hdr { __le32 reserved3[2]; }; +/* iSCSI Asynchronous Message PDU header */ +struct iscsi_async_msg_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F +#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 +#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 all_ones; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 param1_rsrv; + u8 async_vcode; + u8 async_event; + __le16 param3_rsrv; + __le16 param2_rsrv; + __le32 reserved7; +}; + +/* PDU header part of Ystorm task context */ union iscsi_task_hdr { struct iscsi_common_hdr common; struct data_hdr data; @@ -661,6 +666,329 @@ union iscsi_task_hdr { struct iscsi_async_msg_hdr async_msg; }; +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_st_ctx { + struct ystorm_iscsi_task_state state; + struct ystorm_iscsi_task_rxmit_opt rxmit_opt; + union iscsi_task_hdr pdu_hdr; +}; + +struct ystorm_iscsi_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 word0; + u8 flags0; +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 TTT; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct mstorm_iscsi_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct ustorm_iscsi_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + u8 next_tid_valid; + u8 byte3; + __le16 word1; + __le16 next_tid; + __le16 word3; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +/* The iscsi storm task context of Mstorm */ +struct mstorm_iscsi_task_st_ctx { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 rem_task_size; + __le32 data_buffer_offset; + u8 task_type; + struct iscsi_dif_flags dif_flags; + u8 reserved0[2]; + struct regpair sense_db; + __le32 expected_itt; + __le32 reserved1; +}; + +struct iscsi_reg1 { + __le32 reg1_map; +#define ISCSI_REG1_NUM_SGES_MASK 0xF +#define ISCSI_REG1_NUM_SGES_SHIFT 0 +#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF +#define ISCSI_REG1_RESERVED1_SHIFT 4 +}; + +/* The iscsi storm task context of Ustorm */ +struct ustorm_iscsi_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair lun; + struct iscsi_reg1 reg1; + u8 flags2; +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 + struct iscsi_dif_flags dif_flags; + __le16 reserved3; + __le32 reserved4; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; + u8 task_type; + u8 error_flags; +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 + u8 flags; +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 + u8 cq_rss_number; +}; + +/* iscsi task context */ +struct iscsi_task_context { + struct ystorm_iscsi_task_st_ctx ystorm_st_context; + struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct tdif_task_context tdif_context; + struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct mstorm_iscsi_task_st_ctx mstorm_st_context; + struct ustorm_iscsi_task_st_ctx ustorm_st_context; + struct rdif_task_context rdif_context; +}; + +/* iSCSI connection offload params passed by driver to FW in ISCSI offload + * ramrod. + */ +struct iscsi_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le32 initial_ack; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 + u8 pbl_page_size_log; + u8 pbe_page_size_log; + u8 default_cq; + __le32 stat_sn; +}; + +/* spe message header */ +struct iscsi_slow_path_hdr { + u8 op_code; + u8 flags; +#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF +#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 +}; + +/* iSCSI connection update params passed by driver to FW in ISCSI update + *ramrod. + */ +struct iscsi_conn_update_ramrod_params { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 flags; +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6 + u8 reserved0[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 exp_stat_sn; +}; + +/* iSCSI CQ element */ struct iscsi_cqe_common { __le16 conn_id; u8 cqe_type; @@ -669,6 +997,7 @@ struct iscsi_cqe_common { union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ struct iscsi_cqe_solicited { __le16 conn_id; u8 cqe_type; @@ -682,6 +1011,7 @@ struct iscsi_cqe_solicited { union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ struct iscsi_cqe_unsolicited { __le16 conn_id; u8 cqe_type; @@ -693,12 +1023,14 @@ struct iscsi_cqe_unsolicited { union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ union iscsi_cqe { struct iscsi_cqe_common cqe_common; struct iscsi_cqe_solicited cqe_solicited; struct iscsi_cqe_unsolicited cqe_unsolicited; }; +/* iSCSI CQE type */ enum iscsi_cqes_type { ISCSI_CQE_TYPE_SOLICITED = 1, ISCSI_CQE_TYPE_UNSOLICITED, @@ -708,6 +1040,7 @@ enum iscsi_cqes_type { MAX_ISCSI_CQES_TYPE }; +/* iSCSI CQE type */ enum iscsi_cqe_unsolicited_type { ISCSI_CQE_UNSOLICITED_NONE, ISCSI_CQE_UNSOLICITED_SINGLE, @@ -717,37 +1050,28 @@ enum iscsi_cqe_unsolicited_type { MAX_ISCSI_CQE_UNSOLICITED_TYPE }; - +/* iscsi debug modes */ struct iscsi_debug_modes { u8 flags; -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7 -}; - -struct iscsi_dif_flags { - u8 flags; -#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF -#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 -#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 -#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 -#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 -#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 -}; - +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7 +}; + +/* iSCSI kernel completion queue IDs */ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_INIT_FUNC = 0, ISCSI_EVENT_TYPE_DESTROY_FUNC, @@ -772,6 +1096,7 @@ enum iscsi_eqe_opcode { MAX_ISCSI_EQE_OPCODE }; +/* iSCSI EQE and CQE completion status */ enum iscsi_error_types { ISCSI_STATUS_NONE = 0, ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, @@ -823,7 +1148,7 @@ enum iscsi_error_types { MAX_ISCSI_ERROR_TYPES }; - +/* iSCSI Ramrod Command IDs */ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_UNUSED = 0, ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, @@ -836,19 +1161,7 @@ enum iscsi_ramrod_cmd_id { MAX_ISCSI_RAMROD_CMD_ID }; -struct iscsi_reg1 { - __le32 reg1_map; -#define ISCSI_REG1_NUM_SGES_MASK 0xF -#define ISCSI_REG1_NUM_SGES_SHIFT 0 -#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF -#define ISCSI_REG1_RESERVED1_SHIFT 4 -}; - -union iscsi_seq_num { - __le16 data_sn; - __le16 r2t_sn; -}; - +/* iSCSI connection termination request */ struct iscsi_spe_conn_mac_update { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -859,6 +1172,9 @@ struct iscsi_spe_conn_mac_update { u8 reserved0[2]; }; +/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in + * iSCSI offload ramrod. + */ struct iscsi_spe_conn_offload { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -867,6 +1183,9 @@ struct iscsi_spe_conn_offload { struct tcp_offload_params tcp; }; +/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in + * iSCSI offload ramrod. + */ struct iscsi_spe_conn_offload_option2 { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -875,6 +1194,7 @@ struct iscsi_spe_conn_offload_option2 { struct tcp_offload_params_opt2 tcp; }; +/* iSCSI connection termination request */ struct iscsi_spe_conn_termination { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -885,12 +1205,14 @@ struct iscsi_spe_conn_termination { struct regpair query_params_addr; }; +/* iSCSI firmware function destroy parameters */ struct iscsi_spe_func_dstry { struct iscsi_slow_path_hdr hdr; __le16 reserved0; __le32 reserved1; }; +/* iSCSI firmware function init parameters */ struct iscsi_spe_func_init { struct iscsi_slow_path_hdr hdr; __le16 half_way_close_timeout; @@ -908,273 +1230,7 @@ struct iscsi_spe_func_init { struct scsi_init_func_queues q_params; }; -struct ystorm_iscsi_task_state { - struct scsi_cached_sges data_desc; - struct scsi_sgl_params sgl_params; - __le32 exp_r2t_sn; - __le32 buffer_offset; - union iscsi_seq_num seq_num; - struct iscsi_dif_flags dif_flags; - u8 flags; -#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 -#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2 -}; - -struct ystorm_iscsi_task_rxmit_opt { - __le32 fast_rxmit_sge_offset; - __le32 scan_start_buffer_offset; - __le32 fast_rxmit_buffer_offset; - u8 scan_start_sgl_index; - u8 fast_rxmit_sgl_index; - __le16 reserved; -}; - -struct ystorm_iscsi_task_st_ctx { - struct ystorm_iscsi_task_state state; - struct ystorm_iscsi_task_rxmit_opt rxmit_opt; - union iscsi_task_hdr pdu_hdr; -}; - -struct ystorm_iscsi_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 word0; - u8 flags0; -#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 - u8 flags1; -#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 byte2; - __le32 TTT; - u8 byte3; - u8 byte4; - __le16 word1; -}; - -struct mstorm_iscsi_task_ag_ctx { - u8 cdu_validation; - u8 byte1; - __le16 task_cid; - u8 flags0; -#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 - u8 flags1; -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 byte2; - __le32 reg0; - u8 byte3; - u8 byte4; - __le16 word1; -}; - -struct ustorm_iscsi_task_ag_ctx { - u8 reserved; - u8 state; - __le16 icid; - u8 flags0; -#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 - u8 flags1; -#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 - u8 flags2; -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 -#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 - u8 flags3; -#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 - __le32 dif_err_intervals; - __le32 dif_error_1st_interval; - __le32 rcv_cont_len; - __le32 exp_cont_len; - __le32 total_data_acked; - __le32 exp_data_acked; - u8 next_tid_valid; - u8 byte3; - __le16 word1; - __le16 next_tid; - __le16 word3; - __le32 hdr_residual_count; - __le32 exp_r2t_sn; -}; - -struct mstorm_iscsi_task_st_ctx { - struct scsi_cached_sges data_desc; - struct scsi_sgl_params sgl_params; - __le32 rem_task_size; - __le32 data_buffer_offset; - u8 task_type; - struct iscsi_dif_flags dif_flags; - u8 reserved0[2]; - struct regpair sense_db; - __le32 expected_itt; - __le32 reserved1; -}; - -struct ustorm_iscsi_task_st_ctx { - __le32 rem_rcv_len; - __le32 exp_data_transfer_len; - __le32 exp_data_sn; - struct regpair lun; - struct iscsi_reg1 reg1; - u8 flags2; -#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 - struct iscsi_dif_flags dif_flags; - __le16 reserved3; - __le32 reserved4; - __le32 reserved5; - __le32 reserved6; - __le32 reserved7; - u8 task_type; - u8 error_flags; -#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 -#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 - u8 flags; -#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 -#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 -#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 -#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 -#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 -#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 - u8 cq_rss_number; -}; - -struct iscsi_task_context { - struct ystorm_iscsi_task_st_ctx ystorm_st_context; - struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; - struct regpair ystorm_ag_padding[2]; - struct tdif_task_context tdif_context; - struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; - struct regpair mstorm_ag_padding[2]; - struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; - struct mstorm_iscsi_task_st_ctx mstorm_st_context; - struct ustorm_iscsi_task_st_ctx ustorm_st_context; - struct rdif_task_context rdif_context; -}; - +/* iSCSI task type */ enum iscsi_task_type { ISCSI_TASK_TYPE_INITIATOR_WRITE, ISCSI_TASK_TYPE_INITIATOR_READ, @@ -1189,50 +1245,53 @@ enum iscsi_task_type { MAX_ISCSI_TASK_TYPE }; +/* iSCSI DesiredDataTransferLength/ttt union */ union iscsi_ttt_txlen_union { __le32 desired_tx_len; __le32 ttt; }; +/* iSCSI uHQ element */ struct iscsi_uhqe { __le32 reg1; -#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF -#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 -#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 -#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 -#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 -#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 -#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 -#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 -#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 -#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 -#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF -#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 +#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 +#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 +#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 +#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 +#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 +#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 +#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 __le32 reg2; -#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF -#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 -#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF -#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 +#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF +#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 +#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 }; - +/* iSCSI WQ element */ struct iscsi_wqe { __le16 task_id; u8 flags; -#define ISCSI_WQE_WQE_TYPE_MASK 0x7 -#define ISCSI_WQE_WQE_TYPE_SHIFT 0 -#define ISCSI_WQE_NUM_SGES_MASK 0xF -#define ISCSI_WQE_NUM_SGES_SHIFT 3 -#define ISCSI_WQE_RESPONSE_MASK 0x1 -#define ISCSI_WQE_RESPONSE_SHIFT 7 +#define ISCSI_WQE_WQE_TYPE_MASK 0x7 +#define ISCSI_WQE_WQE_TYPE_SHIFT 0 +#define ISCSI_WQE_NUM_SGES_MASK 0xF +#define ISCSI_WQE_NUM_SGES_SHIFT 3 +#define ISCSI_WQE_RESPONSE_MASK 0x1 +#define ISCSI_WQE_RESPONSE_SHIFT 7 struct iscsi_dif_flags prot_flags; __le32 contlen_cdbsize; -#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF -#define ISCSI_WQE_CONT_LEN_SHIFT 0 -#define ISCSI_WQE_CDB_SIZE_MASK 0xFF -#define ISCSI_WQE_CDB_SIZE_SHIFT 24 +#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF +#define ISCSI_WQE_CONT_LEN_SHIFT 0 +#define ISCSI_WQE_CDB_SIZE_MASK 0xFF +#define ISCSI_WQE_CDB_SIZE_SHIFT 24 }; +/* iSCSI wqe type */ enum iscsi_wqe_type { ISCSI_WQE_TYPE_NORMAL, ISCSI_WQE_TYPE_TASK_CLEANUP, @@ -1244,6 +1303,7 @@ enum iscsi_wqe_type { MAX_ISCSI_WQE_TYPE }; +/* iSCSI xHQ element */ struct iscsi_xhqe { union iscsi_ttt_txlen_union ttt_or_txlen; __le32 exp_stat_sn; @@ -1251,27 +1311,30 @@ struct iscsi_xhqe { u8 total_ahs_length; u8 opcode; u8 flags; -#define ISCSI_XHQE_FINAL_MASK 0x1 -#define ISCSI_XHQE_FINAL_SHIFT 0 -#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 -#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 -#define ISCSI_XHQE_NUM_SGES_MASK 0xF -#define ISCSI_XHQE_NUM_SGES_SHIFT 2 -#define ISCSI_XHQE_RESERVED0_MASK 0x3 -#define ISCSI_XHQE_RESERVED0_SHIFT 6 +#define ISCSI_XHQE_FINAL_MASK 0x1 +#define ISCSI_XHQE_FINAL_SHIFT 0 +#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 +#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 +#define ISCSI_XHQE_NUM_SGES_MASK 0xF +#define ISCSI_XHQE_NUM_SGES_SHIFT 2 +#define ISCSI_XHQE_RESERVED0_MASK 0x3 +#define ISCSI_XHQE_RESERVED0_SHIFT 6 union iscsi_seq_num seq_num; __le16 reserved1; }; +/* Per PF iSCSI receive path statistics - mStorm RAM structure */ struct mstorm_iscsi_stats_drv { struct regpair iscsi_rx_dropped_pdus_task_not_valid; }; +/* Per PF iSCSI transmit path statistics - pStorm RAM structure */ struct pstorm_iscsi_stats_drv { struct regpair iscsi_tx_bytes_cnt; struct regpair iscsi_tx_packet_cnt; }; +/* Per PF iSCSI receive path statistics - tStorm RAM structure */ struct tstorm_iscsi_stats_drv { struct regpair iscsi_rx_bytes_cnt; struct regpair iscsi_rx_packet_cnt; @@ -1281,17 +1344,20 @@ struct tstorm_iscsi_stats_drv { __le32 iscsi_immq_threshold_cnt; }; +/* Per PF iSCSI receive path statistics - uStorm RAM structure */ struct ustorm_iscsi_stats_drv { struct regpair iscsi_rx_data_pdu_cnt; struct regpair iscsi_rx_r2t_pdu_cnt; struct regpair iscsi_rx_total_pdu_cnt; }; +/* Per PF iSCSI transmit path statistics - xStorm RAM structure */ struct xstorm_iscsi_stats_drv { struct regpair iscsi_tx_go_to_slow_start_event_cnt; struct regpair iscsi_tx_fast_retransmit_event_cnt; }; +/* Per PF iSCSI transmit path statistics - yStorm RAM structure */ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_data_pdu_cnt; struct regpair iscsi_tx_r2t_pdu_cnt; @@ -1303,68 +1369,68 @@ struct tstorm_iscsi_task_ag_ctx { u8 byte1; __le16 word0; u8 flags0; -#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -1376,18 +1442,20 @@ struct tstorm_iscsi_task_ag_ctx { __le32 reg1; __le32 reg2; }; + +/* iSCSI doorbell data */ struct iscsi_db_data { u8 params; -#define ISCSI_DB_DATA_DEST_MASK 0x3 -#define ISCSI_DB_DATA_DEST_SHIFT 0 -#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 -#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 -#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 -#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 -#define ISCSI_DB_DATA_RESERVED_MASK 0x1 -#define ISCSI_DB_DATA_RESERVED_SHIFT 5 -#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 sq_prod; }; diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index b8b3e1cfae90..c6cfd39cd910 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h @@ -29,9 +29,12 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ + #ifndef __IWARP_COMMON__ #define __IWARP_COMMON__ + #include + /************************/ /* IWARP FW CONSTANTS */ /************************/ @@ -40,14 +43,14 @@ #define IWARP_PASSIVE_MODE 1 #define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) -#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) -#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) -#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) -#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) -#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) -#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) +#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) +#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) -#define IWARP_MAX_QPS (64 * 1024) +#define IWARP_MAX_QPS (64 * 1024) #endif /* __IWARP_COMMON__ */ diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index cc646ca97974..0301499b59ae 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -316,16 +316,16 @@ enum qed_int_mode { }; struct qed_sb_info { - struct status_block *sb_virt; - dma_addr_t sb_phys; - u32 sb_ack; /* Last given ack */ - u16 igu_sb_id; - void __iomem *igu_addr; - u8 flags; -#define QED_SB_INFO_INIT 0x1 -#define QED_SB_INFO_SETUP 0x2 - - struct qed_dev *cdev; + struct status_block *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void __iomem *igu_addr; + u8 flags; +#define QED_SB_INFO_INIT 0x1 +#define QED_SB_INFO_SETUP 0x2 + + struct qed_dev *cdev; }; enum qed_dev_type { diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index a9b3050f469c..c1a446ebe362 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -32,28 +32,29 @@ #ifndef __RDMA_COMMON__ #define __RDMA_COMMON__ + /************************/ /* RDMA FW CONSTANTS */ /************************/ -#define RDMA_RESERVED_LKEY (0) -#define RDMA_RING_PAGE_SIZE (0x1000) +#define RDMA_RESERVED_LKEY (0) +#define RDMA_RING_PAGE_SIZE (0x1000) -#define RDMA_MAX_SGE_PER_SQ_WQE (4) -#define RDMA_MAX_SGE_PER_RQ_WQE (4) +#define RDMA_MAX_SGE_PER_SQ_WQE (4) +#define RDMA_MAX_SGE_PER_RQ_WQE (4) #define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) -#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) -#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) +#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) +#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) -#define RDMA_MAX_CQS (64 * 1024) -#define RDMA_MAX_TIDS (128 * 1024 - 1) -#define RDMA_MAX_PDS (64 * 1024) +#define RDMA_MAX_CQS (64 * 1024) +#define RDMA_MAX_TIDS (128 * 1024 - 1) +#define RDMA_MAX_PDS (64 * 1024) -#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS -#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 -#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB +#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 +#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index fe6a33e45977..e15e0da71240 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -33,13 +33,18 @@ #ifndef __ROCE_COMMON__ #define __ROCE_COMMON__ -#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) -#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) +/************************/ +/* ROCE FW CONSTANTS */ +/************************/ -#define ROCE_MAX_QPS (32 * 1024) -#define ROCE_DCQCN_NP_MAX_QPS (64) -#define ROCE_DCQCN_RP_MAX_QPS (64) +#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) +#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) +#define ROCE_MAX_QPS (32 * 1024) +#define ROCE_DCQCN_NP_MAX_QPS (64) +#define ROCE_DCQCN_RP_MAX_QPS (64) + +/* Affiliated asynchronous events / errors enumeration */ enum roce_async_events_type { ROCE_ASYNC_EVENT_NONE = 0, ROCE_ASYNC_EVENT_COMM_EST = 1, diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 08df82a096b6..f8c7b408e842 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -33,43 +33,53 @@ #ifndef __STORAGE_COMMON__ #define __STORAGE_COMMON__ -#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) -#define BDQ_NUM_RESOURCES (4) +/*********************/ +/* SCSI CONSTANTS */ +/*********************/ -#define BDQ_ID_RQ (0) -#define BDQ_ID_IMM_DATA (1) -#define BDQ_NUM_IDS (2) +#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) +#define BDQ_NUM_RESOURCES (4) -#define SCSI_NUM_SGES_SLOW_SGL_THR 8 +#define BDQ_ID_RQ (0) +#define BDQ_ID_IMM_DATA (1) +#define BDQ_NUM_IDS (2) -#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) +#define SCSI_NUM_SGES_SLOW_SGL_THR 8 +#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15) + +/* SCSI buffer descriptor */ struct scsi_bd { struct regpair address; struct regpair opaque; }; +/* Scsi Drv BDQ struct */ struct scsi_bdq_ram_drv_data { __le16 external_producer; __le16 reserved0[3]; }; +/* SCSI SGE entry */ struct scsi_sge { struct regpair sge_addr; __le32 sge_len; __le32 reserved; }; +/* Cached SGEs section */ struct scsi_cached_sges { struct scsi_sge sge[4]; }; +/* Scsi Drv CMDQ struct */ struct scsi_drv_cmdq { __le16 cmdq_cons; __le16 reserved0; __le32 reserved1; }; +/* Common SCSI init params passed by driver to FW in function init ramrod */ struct scsi_init_func_params { __le16 num_tasks; u8 log_page_size; @@ -77,6 +87,7 @@ struct scsi_init_func_params { u8 reserved2[12]; }; +/* SCSI RQ/CQ/CMDQ firmware function init parameters */ struct scsi_init_func_queues { struct regpair glbl_q_params_addr; __le16 rq_buffer_size; @@ -84,14 +95,14 @@ struct scsi_init_func_queues { __le16 cmdq_num_entries; u8 bdq_resource_id; u8 q_validity; -#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 -#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 -#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 -#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F -#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 +#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F +#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 u8 num_queues; u8 queue_relative_offset; u8 cq_sb_pi; @@ -107,16 +118,19 @@ struct scsi_init_func_queues { __le32 reserved1; }; +/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */ struct scsi_ram_per_bdq_resource_drv_data { struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; }; +/* SCSI SGL types */ enum scsi_sgl_mode { SCSI_TX_SLOW_SGL, SCSI_FAST_SGL, MAX_SCSI_SGL_MODE }; +/* SCSI SGL parameters */ struct scsi_sgl_params { struct regpair sgl_addr; __le32 sgl_total_length; @@ -126,6 +140,7 @@ struct scsi_sgl_params { u8 reserved; }; +/* SCSI terminate connection params */ struct scsi_terminate_extra_params { __le16 unsolicited_cq_count; __le16 cmdq_count; diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index dbf7a43c3e1f..65b95fd25101 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -33,8 +33,13 @@ #ifndef __TCP_COMMON__ #define __TCP_COMMON__ -#define TCP_INVALID_TIMEOUT_VAL -1 +/********************/ +/* TCP FW CONSTANTS */ +/********************/ +#define TCP_INVALID_TIMEOUT_VAL -1 + +/* OOO opaque data received from LL2 */ struct ooo_opaque { __le32 cid; u8 drop_isle; @@ -43,25 +48,29 @@ struct ooo_opaque { u8 ooo_isle; }; +/* tcp connect mode enum */ enum tcp_connect_mode { TCP_CONNECT_ACTIVE, TCP_CONNECT_PASSIVE, MAX_TCP_CONNECT_MODE }; +/* tcp function init parameters */ struct tcp_init_params { __le32 two_msl_timer; __le16 tx_sws_timer; - u8 maxfinrt; + u8 max_fin_rt; u8 reserved[9]; }; +/* tcp IPv4/IPv6 enum */ enum tcp_ip_version { TCP_IPV4, TCP_IPV6, MAX_TCP_IP_VERSION }; +/* tcp offload parameters */ struct tcp_offload_params { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; @@ -71,22 +80,22 @@ struct tcp_offload_params { __le16 remote_mac_addr_hi; __le16 vlan_id; u8 flags; -#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 -#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 -#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 -#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 +#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 +#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 u8 ip_version; __le32 remote_ip[4]; __le32 local_ip[4]; @@ -132,6 +141,7 @@ struct tcp_offload_params { __le32 reserved3[2]; }; +/* tcp offload parameters */ struct tcp_offload_params_opt2 { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; @@ -141,14 +151,14 @@ struct tcp_offload_params_opt2 { __le16 remote_mac_addr_hi; __le16 vlan_id; u8 flags; -#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 -#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 -#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 u8 ip_version; __le32 remote_ip[4]; __le32 local_ip[4]; @@ -166,6 +176,7 @@ struct tcp_offload_params_opt2 { __le32 reserved1[22]; }; +/* tcp IPv4/IPv6 enum */ enum tcp_seg_placement_event { TCP_EVENT_ADD_PEN, TCP_EVENT_ADD_NEW_ISLE, @@ -177,40 +188,41 @@ enum tcp_seg_placement_event { MAX_TCP_SEG_PLACEMENT_EVENT }; +/* tcp init parameters */ struct tcp_update_params { __le16 flags; -#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 -#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 -#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 -#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 -#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 -#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 -#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 -#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 -#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 -#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 -#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 -#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 -#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 -#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 -#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 -#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 -#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 +#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 +#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 +#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; @@ -226,6 +238,7 @@ struct tcp_update_params { u8 reserved1[7]; }; +/* toe upload parameters */ struct tcp_upload_params { __le32 rcv_next; __le32 snd_una; -- cgit v1.2.3 From 21dd79e82f00b29eba665ed0c33fd5f2214e7f99 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Wed, 27 Dec 2017 19:30:06 +0200 Subject: qed*: HSI renaming for different types of HW This patch renames defines and structures in the FW HSI files to allow a distinction between different types of HW. Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: Chad Dupuis Signed-off-by: Manish Rangankar Signed-off-by: Tomer Tayar Signed-off-by: David S. Miller --- include/linux/qed/common_hsi.h | 30 ++-- include/linux/qed/fcoe_common.h | 362 +++++++++++++++++++-------------------- include/linux/qed/iscsi_common.h | 360 +++++++++++++++++++------------------- include/linux/qed/qed_if.h | 4 +- 4 files changed, 378 insertions(+), 378 deletions(-) (limited to 'include/linux') diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 4874c104144b..5aa9513189df 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -156,11 +156,11 @@ #define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) #define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) -#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) +#define MAX_NUM_VOQS_E4 (MAX_NUM_VOQS_K2) #define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) /* CIDs */ -#define NUM_OF_CONNECTION_TYPES (8) +#define NUM_OF_CONNECTION_TYPES_E4 (8) #define NUM_OF_LCIDS (320) #define NUM_OF_LTIDS (320) @@ -401,7 +401,7 @@ #define CAU_FSM_ETH_TX 1 /* Number of Protocol Indices per Status Block */ -#define PIS_PER_SB 12 +#define PIS_PER_SB_E4 12 #define CAU_HC_STOPPED_STATE 3 #define CAU_HC_DISABLE_STATE 4 @@ -1202,20 +1202,20 @@ struct rdif_task_context { }; /* Status block structure */ -struct status_block { - __le16 pi_array[PIS_PER_SB]; +struct status_block_e4 { + __le16 pi_array[PIS_PER_SB_E4]; __le32 sb_num; -#define STATUS_BLOCK_SB_NUM_MASK 0x1FF -#define STATUS_BLOCK_SB_NUM_SHIFT 0 -#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F -#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 -#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF -#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 +#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16 __le32 prod_index; -#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF -#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 -#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF -#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 +#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24 }; /* Tdif context */ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index e720a7b37b0b..52a1e8d01508 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -152,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx { u8 reserved2[8]; }; -struct ystorm_fcoe_task_ag_ctx { +struct e4_ystorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; @@ -208,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx { __le32 reg2; }; -struct tstorm_fcoe_task_ag_ctx { +struct e4_tstorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 u8 flags1; -#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 u8 flags3; -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 -#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 cleanup_state; __le16 last_sent_tid; __le32 rec_rr_tov_exp_timeout; @@ -354,49 +354,49 @@ struct tstorm_fcoe_task_st_ctx { struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; }; -struct mstorm_fcoe_task_ag_ctx { +struct e4_mstorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 icid; u8 flags0; -#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 -#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 cleanup_state; __le32 received_bytes; u8 byte3; @@ -442,56 +442,56 @@ struct mstorm_fcoe_task_st_ctx { struct scsi_cached_sges data_desc; }; -struct ustorm_fcoe_task_ag_ctx { +struct e4_ustorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; -#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 global_cq_num; @@ -501,18 +501,18 @@ struct ustorm_fcoe_task_ag_ctx { }; /* FCoE task context */ -struct fcoe_task_context { +struct e4_fcoe_task_context { struct ystorm_fcoe_task_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct tdif_task_context tdif_context; - struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; - struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; + struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context; + struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context; struct timers_context timer_context; struct tstorm_fcoe_task_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; + struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context; struct mstorm_fcoe_task_st_ctx mstorm_st_context; - struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; + struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context; struct rdif_task_context rdif_context; }; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index b8f83507f659..331b9c3e5148 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -673,49 +673,49 @@ struct ystorm_iscsi_task_st_ctx { union iscsi_task_hdr pdu_hdr; }; -struct ystorm_iscsi_task_ag_ctx { +struct e4_ystorm_iscsi_task_ag_ctx { u8 reserved; u8 byte1; __le16 word0; u8 flags0; -#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 TTT; u8 byte3; @@ -723,49 +723,49 @@ struct ystorm_iscsi_task_ag_ctx { __le16 word1; }; -struct mstorm_iscsi_task_ag_ctx { +struct e4_mstorm_iscsi_task_ag_ctx { u8 cdu_validation; u8 byte1; __le16 task_cid; u8 flags0; -#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 u8 flags1; -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; @@ -773,56 +773,56 @@ struct mstorm_iscsi_task_ag_ctx { __le16 word1; }; -struct ustorm_iscsi_task_ag_ctx { +struct e4_ustorm_iscsi_task_ag_ctx { u8 reserved; u8 state; __le16 icid; u8 flags0; -#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 u8 flags1; -#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 -#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 u8 flags3; -#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 rcv_cont_len; @@ -907,14 +907,14 @@ struct ustorm_iscsi_task_st_ctx { }; /* iscsi task context */ -struct iscsi_task_context { +struct e4_iscsi_task_context { struct ystorm_iscsi_task_st_ctx ystorm_st_context; - struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context; struct regpair ystorm_ag_padding[2]; struct tdif_task_context tdif_context; - struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context; struct regpair mstorm_ag_padding[2]; - struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context; struct mstorm_iscsi_task_st_ctx mstorm_st_context; struct ustorm_iscsi_task_st_ctx ustorm_st_context; struct rdif_task_context rdif_context; @@ -1364,73 +1364,73 @@ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_total_pdu_cnt; }; -struct tstorm_iscsi_task_ag_ctx { +struct e4_tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 0301499b59ae..310442f4b46d 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -316,7 +316,7 @@ enum qed_int_mode { }; struct qed_sb_info { - struct status_block *sb_virt; + struct status_block_e4 *sb_virt; dma_addr_t sb_phys; u32 sb_ack; /* Last given ack */ u16 igu_sb_id; @@ -939,7 +939,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) u16 rc = 0; prod = le32_to_cpu(sb_info->sb_virt->prod_index) & - STATUS_BLOCK_PROD_INDEX_MASK; + STATUS_BLOCK_E4_PROD_INDEX_MASK; if (sb_info->sb_ack != prod) { sb_info->sb_ack = prod; rc |= QED_SB_IDX; -- cgit v1.2.3 From da09091732aecc2d9a068c5bd8e9fd925cc430f2 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Wed, 27 Dec 2017 19:30:07 +0200 Subject: qed*: Utilize FW 8.33.1.0 Advance the qed* drivers to use firmware 8.33.1.0: Modify core driver (qed) to utilize the new FW and initialize the device with it. This is the lion's share of the patch, and includes changes to FW interface files, device initialization flows, FW interaction flows, and debug collection flows. Modify Ethernet driver (qede) to make use of new FW in fastpath. Modify RoCE/iWARP driver (qedr) to make use of new FW in fastpath. Modify FCoE driver (qedf) to make use of new FW in fastpath. Modify iSCSI driver (qedi) to make use of new FW in fastpath. Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: Yuval Bason Signed-off-by: Ram Amrani Signed-off-by: Manish Chopra Signed-off-by: Chad Dupuis Signed-off-by: Manish Rangankar Signed-off-by: Tomer Tayar Signed-off-by: David S. Miller --- include/linux/qed/common_hsi.h | 79 ++++++++++++------ include/linux/qed/eth_common.h | 33 ++++++-- include/linux/qed/fcoe_common.h | 52 +++++++++++- include/linux/qed/iscsi_common.h | 165 ++++++++++++++++++++++++++++++------- include/linux/qed/qed_eth_if.h | 38 +++++++-- include/linux/qed/qed_if.h | 16 ++-- include/linux/qed/qed_iscsi_if.h | 2 - include/linux/qed/qed_ll2_if.h | 2 +- include/linux/qed/storage_common.h | 50 +++++++++-- include/linux/qed/tcp_common.h | 52 +++++++----- 10 files changed, 379 insertions(+), 110 deletions(-) (limited to 'include/linux') diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 5aa9513189df..2b3b350e07b7 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -109,8 +109,8 @@ #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 20 -#define FW_REVISION_VERSION 0 +#define FW_MINOR_VERSION 33 +#define FW_REVISION_VERSION 1 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -148,17 +148,10 @@ /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ #define NUM_PHYS_TCS_4PORT_K2 (4) #define NUM_OF_PHYS_TCS (8) - +#define PURE_LB_TC NUM_OF_PHYS_TCS #define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) -#define LB_TC (NUM_OF_PHYS_TCS) - -#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) -#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) -#define MAX_NUM_VOQS_E4 (MAX_NUM_VOQS_K2) -#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) - /* CIDs */ #define NUM_OF_CONNECTION_TYPES_E4 (8) #define NUM_OF_LCIDS (320) @@ -602,6 +595,11 @@ #define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 #define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 +#define PXP_VF_BAR0_START_IGU2 0x10000 +#define PXP_VF_BAR0_IGU2_LENGTH 0xD000 +#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \ + PXP_VF_BAR0_IGU2_LENGTH - 1) + #define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 @@ -662,14 +660,6 @@ #define PRS_GFT_CAM_LINES_NO_MATCH 31 -/* Async data KCQ CQE */ -struct async_data { - __le32 cid; - __le16 itid; - u8 error_code; - u8 fw_debug_param; -}; - /* Interrupt coalescing TimeSet */ struct coalescing_timeset { u8 value; @@ -690,9 +680,26 @@ struct eth_rx_prod_data { __le16 cqe_prod; }; +struct tcp_ulp_connect_done_params { + __le16 mss; + u8 snd_wnd_scale; + u8 flags; +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1 +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0 +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1 +}; + +struct iscsi_connect_done_results { + __le16 icid; + __le16 conn_id; + struct tcp_ulp_connect_done_params params; +}; + struct iscsi_eqe_data { - __le32 cid; + __le16 icid; __le16 conn_id; + __le16 reserved; u8 error_code; u8 error_pdu_opcode_reserved; #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F @@ -756,7 +763,7 @@ struct ustorm_queue_zone { /* Status block structure */ struct cau_pi_entry { - u32 prod; + __le32 prod; #define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF #define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 #define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F @@ -769,14 +776,14 @@ struct cau_pi_entry { /* Status block structure */ struct cau_sb_entry { - u32 data; + __le32 data; #define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF #define CAU_SB_ENTRY_SB_PROD_SHIFT 0 #define CAU_SB_ENTRY_STATE0_MASK 0xF #define CAU_SB_ENTRY_STATE0_SHIFT 24 #define CAU_SB_ENTRY_STATE1_MASK 0xF #define CAU_SB_ENTRY_STATE1_SHIFT 28 - u32 params; + __le32 params; #define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F #define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 #define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F @@ -954,7 +961,7 @@ enum igu_int_cmd { /* IGU producer or consumer update command */ struct igu_prod_cons_update { - u32 sb_id_and_flags; + __le32 sb_id_and_flags; #define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF #define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 @@ -969,7 +976,7 @@ struct igu_prod_cons_update { #define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 - u32 reserved1; + __le32 reserved1; }; /* Igu segments access for default status block only */ @@ -979,6 +986,30 @@ enum igu_seg_access { MAX_IGU_SEG_ACCESS }; +/* Enumeration for L3 type field of parsing_and_err_flags. + * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6 + * (This field can be filled according to the last-ethertype) + */ +enum l3_type { + e_l3_type_unknown, + e_l3_type_ipv4, + e_l3_type_ipv6, + MAX_L3_TYPE +}; + +/* Enumeration for l4Protocol field of parsing_and_err_flags. + * L4-protocol: 0 - none, 1 - TCP, 2 - UDP. + * If the packet is IPv4 fragment, and its not the first fragment, the + * protocol-type should be set to none. + */ +enum l4_protocol { + e_l4_protocol_none, + e_l4_protocol_tcp, + e_l4_protocol_udp, + MAX_L4_PROTOCOL +}; + +/* Parsing and error flags field */ struct parsing_and_err_flags { __le16 flags; #define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index f554f4b58dfe..9db02856623b 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -104,6 +104,27 @@ /* Control frame check constants */ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 +/* GFS constants */ +#define ETH_GFT_TRASH_CAN_VPORT 0x1FF + +/* Destination port mode */ +enum dest_port_mode { + DEST_PORT_PHY, + DEST_PORT_LOOPBACK, + DEST_PORT_PHY_LOOPBACK, + DEST_PORT_DROP, + MAX_DEST_PORT_MODE +}; + +/* Ethernet address type */ +enum eth_addr_type { + BROADCAST_ADDRESS, + MULTICAST_ADDRESS, + UNICAST_ADDRESS, + UNKNOWN_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + struct eth_tx_1st_bd_flags { u8 bitfields; #define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 @@ -176,10 +197,6 @@ struct eth_edpm_fw_data { __le32 reserved; }; -struct eth_fast_path_cqe_fw_debug { - __le16 reserved2; -}; - /* Tunneling parsing flags */ struct eth_tunnel_parsing_flags { u8 flags; @@ -226,9 +243,9 @@ struct eth_fast_path_rx_reg_cqe { u8 placement_offset; struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 bd_num; - u8 reserved[9]; - struct eth_fast_path_cqe_fw_debug fw_debug; - u8 reserved1[3]; + u8 reserved; + __le16 flow_id; + u8 reserved1[11]; struct eth_pmd_flow_flags pmd_flags; }; @@ -280,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe { u8 tpa_agg_index; u8 header_len; __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; - struct eth_fast_path_cqe_fw_debug fw_debug; + __le16 flow_id; u8 reserved; struct eth_pmd_flow_flags pmd_flags; }; diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 52a1e8d01508..22077c586853 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -580,10 +580,12 @@ struct fcoe_conn_offload_ramrod_data { #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7 __le16 conn_id; u8 def_q_idx; u8 reserved[5]; @@ -594,6 +596,13 @@ struct fcoe_conn_terminate_ramrod_data { struct regpair terminate_params_addr; }; +/* FCoE device type */ +enum fcoe_device_type { + FCOE_TASK_DEV_TYPE_DISK, + FCOE_TASK_DEV_TYPE_TAPE, + MAX_FCOE_DEVICE_TYPE +}; + /* Data sgl */ struct fcoe_fast_sgl_ctx { struct regpair sgl_start_addr; @@ -608,7 +617,7 @@ struct fcoe_init_func_ramrod_data { struct scsi_init_func_queues q_params; __le16 mtu; __le16 sq_num_pages_in_pbl; - __le32 reserved; + __le32 reserved[3]; }; /* FCoE: Mode of the connection: Target or Initiator or both */ @@ -633,11 +642,46 @@ struct fcoe_rx_stat { __le32 rsrv; }; +/* FCoE SQE request type */ +enum fcoe_sqe_request_type { + SEND_FCOE_CMD, + SEND_FCOE_MIDPATH, + SEND_FCOE_ABTS_REQUEST, + FCOE_EXCHANGE_CLEANUP, + FCOE_SEQUENCE_RECOVERY, + SEND_FCOE_XFER_RDY, + SEND_FCOE_RSP, + SEND_FCOE_RSP_WITH_SENSE_DATA, + SEND_FCOE_TARGET_DATA, + SEND_FCOE_INITIATOR_DATA, + SEND_FCOE_XFER_CONTINUATION_RDY, + SEND_FCOE_TARGET_ABTS_RSP, + MAX_FCOE_SQE_REQUEST_TYPE +}; + /* FCoe statistics request */ struct fcoe_stat_ramrod_data { struct regpair stat_params_addr; }; +/* FCoE task type */ +enum fcoe_task_type { + FCOE_TASK_TYPE_WRITE_INITIATOR, + FCOE_TASK_TYPE_READ_INITIATOR, + FCOE_TASK_TYPE_MIDPATH, + FCOE_TASK_TYPE_UNSOLICITED, + FCOE_TASK_TYPE_ABTS, + FCOE_TASK_TYPE_EXCHANGE_CLEANUP, + FCOE_TASK_TYPE_SEQUENCE_CLEANUP, + FCOE_TASK_TYPE_WRITE_TARGET, + FCOE_TASK_TYPE_READ_TARGET, + FCOE_TASK_TYPE_RSP, + FCOE_TASK_TYPE_RSP_SENSE_DATA, + FCOE_TASK_TYPE_ABTS_TARGET, + FCOE_TASK_TYPE_ENUM_SIZE, + MAX_FCOE_TASK_TYPE +}; + /* Per PF FCoE transmit path statistics - pStorm RAM structure */ struct fcoe_tx_stat { struct regpair fcoe_tx_byte_cnt; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 331b9c3e5148..4cc9b37b8d95 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -106,6 +106,12 @@ #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) #define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) +/* Union of data bd_opaque/ tq_tid */ +union bd_opaque_tq_union { + __le16 bd_opaque; + __le16 tq_tid; +}; + /* ISCSI SGL entry */ struct cqe_error_bitmap { u8 cqe_error_status_bits; @@ -133,6 +139,65 @@ struct data_hdr { __le32 data[12]; }; +struct lun_mapper_addr_reserved { + struct regpair lun_mapper_addr; + u8 reserved0[8]; +}; + +/* rdif conetxt for dif on immediate */ +struct dif_on_immediate_params { + __le32 initial_ref_tag; + __le16 application_tag; + __le16 application_tag_mask; + __le16 flags1; +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5 +#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6 +#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8 +#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF +#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 + u8 flags0; +#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0 +#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1 +#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3 +#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3 +#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4 +#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6 +#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7 + u8 reserved_zero[5]; +}; + +/* iSCSI dif on immediate mode attributes union */ +union dif_configuration_params { + struct lun_mapper_addr_reserved lun_mapper_address; + struct dif_on_immediate_params def_dif_conf; +}; + /* Union of data/r2t sequence number */ union iscsi_seq_num { __le16 data_sn; @@ -163,8 +228,10 @@ struct ystorm_iscsi_task_state { #define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2 +#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3 }; /* The iscsi storm task context of Ystorm */ @@ -846,7 +913,7 @@ struct mstorm_iscsi_task_st_ctx { __le32 data_buffer_offset; u8 task_type; struct iscsi_dif_flags dif_flags; - u8 reserved0[2]; + __le16 dif_task_icid; struct regpair sense_db; __le32 expected_itt; __le32 reserved1; @@ -860,6 +927,10 @@ struct iscsi_reg1 { #define ISCSI_REG1_RESERVED1_SHIFT 4 }; +struct tqe_opaque { + __le16 opaque[2]; +}; + /* The iscsi storm task context of Ustorm */ struct ustorm_iscsi_task_st_ctx { __le32 rem_rcv_len; @@ -874,7 +945,7 @@ struct ustorm_iscsi_task_st_ctx { #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 struct iscsi_dif_flags dif_flags; __le16 reserved3; - __le32 reserved4; + struct tqe_opaque tqe_opaque_list; __le32 reserved5; __le32 reserved6; __le32 reserved7; @@ -946,6 +1017,18 @@ struct iscsi_conn_offload_params { __le32 stat_sn; }; +/* iSCSI connection statistics */ +struct iscsi_conn_stats_params { + struct regpair iscsi_tcp_tx_packets_cnt; + struct regpair iscsi_tcp_tx_bytes_cnt; + struct regpair iscsi_tcp_tx_rxmit_cnt; + struct regpair iscsi_tcp_rx_packets_cnt; + struct regpair iscsi_tcp_rx_bytes_cnt; + struct regpair iscsi_tcp_rx_dup_ack_cnt; + __le32 iscsi_tcp_rx_chksum_err_cnt; + __le32 reserved; +}; + /* spe message header */ struct iscsi_slow_path_hdr { u8 op_code; @@ -978,14 +1061,17 @@ struct iscsi_conn_update_ramrod_params { #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7 u8 reserved0[3]; __le32 max_seq_size; __le32 max_send_pdu_length; __le32 max_recv_pdu_length; __le32 first_seq_length; __le32 exp_stat_sn; + union dif_configuration_params dif_on_imme_params; }; /* iSCSI CQ element */ @@ -1007,7 +1093,7 @@ struct iscsi_cqe_solicited { u8 fw_dbg_field; u8 caused_conn_err; u8 reserved0[3]; - __le32 reserved1[1]; + __le32 data_truncated_bytes; union iscsi_task_hdr iscsi_hdr; }; @@ -1019,7 +1105,8 @@ struct iscsi_cqe_unsolicited { __le16 reserved0; u8 reserved1; u8 unsol_cqe_type; - struct regpair rqe_opaque; + __le16 rqe_opaque; + __le16 reserved2[3]; union iscsi_task_hdr iscsi_hdr; }; @@ -1053,22 +1140,22 @@ enum iscsi_cqe_unsolicited_type { /* iscsi debug modes */ struct iscsi_debug_modes { u8 flags; -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6 +#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7 }; /* iSCSI kernel completion queue IDs */ @@ -1080,9 +1167,9 @@ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_CLEAR_SQ, ISCSI_EVENT_TYPE_TERMINATE_CONN, ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, + ISCSI_EVENT_TYPE_COLLECT_STATS_CONN, ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, - RESERVED9, ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, @@ -1158,6 +1245,7 @@ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, + ISCSI_RAMROD_CMD_ID_CONN_STATS = 8, MAX_ISCSI_RAMROD_CMD_ID }; @@ -1194,6 +1282,16 @@ struct iscsi_spe_conn_offload_option2 { struct tcp_offload_params_opt2 tcp; }; +/* iSCSI collect connection statistics request */ +struct iscsi_spe_conn_statistics { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 reset_stats; + u8 reserved0[7]; + struct regpair stats_cnts_addr; +}; + /* iSCSI connection termination request */ struct iscsi_spe_conn_termination { struct iscsi_slow_path_hdr hdr; @@ -1220,12 +1318,14 @@ struct iscsi_spe_func_init { u8 num_r2tq_pages_in_ring; u8 num_uhq_pages_in_ring; u8 ll2_rx_queue_id; - u8 ooo_enable; + u8 flags; +#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 +#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 +#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F +#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1 struct iscsi_debug_modes debug_mode; __le16 reserved1; __le32 reserved2; - __le32 reserved3; - __le32 reserved4; struct scsi_init_func_params func_params; struct scsi_init_func_queues q_params; }; @@ -1242,6 +1342,7 @@ enum iscsi_task_type { ISCSI_TASK_TYPE_TARGET_READ, ISCSI_TASK_TYPE_TARGET_RESPONSE, ISCSI_TASK_TYPE_LOGIN_RESPONSE, + ISCSI_TASK_TYPE_TARGET_IMM_W_DIF, MAX_ISCSI_TASK_TYPE }; @@ -1326,6 +1427,7 @@ struct iscsi_xhqe { /* Per PF iSCSI receive path statistics - mStorm RAM structure */ struct mstorm_iscsi_stats_drv { struct regpair iscsi_rx_dropped_pdus_task_not_valid; + struct regpair iscsi_rx_dup_ack_cnt; }; /* Per PF iSCSI transmit path statistics - pStorm RAM structure */ @@ -1339,6 +1441,9 @@ struct tstorm_iscsi_stats_drv { struct regpair iscsi_rx_bytes_cnt; struct regpair iscsi_rx_packet_cnt; struct regpair iscsi_rx_new_ooo_isle_events_cnt; + struct regpair iscsi_rx_tcp_payload_bytes_cnt; + struct regpair iscsi_rx_tcp_pkt_cnt; + struct regpair iscsi_rx_pure_ack_cnt; __le32 iscsi_cmdq_threshold_cnt; __le32 iscsi_rq_threshold_cnt; __le32 iscsi_immq_threshold_cnt; @@ -1355,6 +1460,8 @@ struct ustorm_iscsi_stats_drv { struct xstorm_iscsi_stats_drv { struct regpair iscsi_tx_go_to_slow_start_event_cnt; struct regpair iscsi_tx_fast_retransmit_event_cnt; + struct regpair iscsi_tx_pure_ack_cnt; + struct regpair iscsi_tx_delayed_ack_cnt; }; /* Per PF iSCSI transmit path statistics - yStorm RAM structure */ @@ -1362,6 +1469,8 @@ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_data_pdu_cnt; struct regpair iscsi_tx_r2t_pdu_cnt; struct regpair iscsi_tx_total_pdu_cnt; + struct regpair iscsi_tx_tcp_payload_bytes_cnt; + struct regpair iscsi_tx_tcp_pkt_cnt; }; struct e4_tstorm_iscsi_task_ag_ctx { diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d60de4a39810..147d08ccf813 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -61,6 +61,35 @@ struct qed_txq_start_ret_params { void *p_handle; }; +enum qed_filter_config_mode { + QED_FILTER_CONFIG_MODE_DISABLE, + QED_FILTER_CONFIG_MODE_5_TUPLE, + QED_FILTER_CONFIG_MODE_L4_PORT, + QED_FILTER_CONFIG_MODE_IP_DEST, +}; + +struct qed_ntuple_filter_params { + /* Physically mapped address containing header of buffer to be used + * as filter. + */ + dma_addr_t addr; + + /* Length of header in bytes */ + u16 length; + + /* Relative queue-id to receive classified packet */ +#define QED_RFS_NTUPLE_QID_RSS ((u16)-1) + u16 qid; + + /* Identifier can either be according to vport-id or vfid */ + bool b_is_vf; + u8 vport_id; + u8 vf_id; + + /* true iff this filter is to be added. Else to be removed */ + bool b_is_add; +}; + struct qed_dev_eth_info { struct qed_dev_info common; @@ -316,13 +345,12 @@ struct qed_eth_ops { int (*tunn_config)(struct qed_dev *cdev, struct qed_tunn_params *params); - int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie, - dma_addr_t mapping, u16 length, - u16 vport_id, u16 rx_queue_id, - bool add_filter); + int (*ntuple_filter_config)(struct qed_dev *cdev, + void *cookie, + struct qed_ntuple_filter_params *params); int (*configure_arfs_searcher)(struct qed_dev *cdev, - bool en_searcher); + enum qed_filter_config_mode mode); int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); }; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 310442f4b46d..15e398c7230e 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -244,16 +244,11 @@ struct qed_fcoe_pf_params { /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; - u64 bdq_pbl_base_addr[2]; - u32 max_cwnd; + u64 bdq_pbl_base_addr[3]; u16 cq_num_entries; u16 cmdq_num_entries; u32 two_msl_timer; - u16 dup_ack_threshold; u16 tx_sws_timer; - u16 min_rto; - u16 min_rto_rt; - u16 max_rto; /* The following parameters are used during HW-init * and these parameters need to be passed as arguments @@ -264,8 +259,8 @@ struct qed_iscsi_pf_params { /* The following parameters are used during protocol-init */ u16 half_way_close_timeout; - u16 bdq_xoff_threshold[2]; - u16 bdq_xon_threshold[2]; + u16 bdq_xoff_threshold[3]; + u16 bdq_xon_threshold[3]; u16 cmdq_xoff_threshold; u16 cmdq_xon_threshold; u16 rq_buffer_size; @@ -281,10 +276,11 @@ struct qed_iscsi_pf_params { u8 gl_cmd_pi; u8 debug_mode; u8 ll2_ooo_queue_id; - u8 ooo_enable; u8 is_target; - u8 bdq_pbl_num_entries[2]; + u8 is_soc_en; + u8 soc_num_of_blocks_log; + u8 bdq_pbl_num_entries[3]; }; struct qed_rdma_pf_params { diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 111e606a74c8..d0df1bec5357 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -102,7 +102,6 @@ struct qed_iscsi_params_offload { u32 ss_thresh; u16 srtt; u16 rtt_var; - u32 ts_time; u32 ts_recent; u32 ts_recent_age; u32 total_rt; @@ -124,7 +123,6 @@ struct qed_iscsi_params_offload { u16 mss; u8 snd_wnd_scale; u8 rcv_wnd_scale; - u32 ts_ticks_per_second; u16 da_timeout_value; u8 ack_frequency; }; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index e755954d85fd..266c1fb45387 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data { u32 opaque_data_1; /* GSI only */ - u32 gid_dst[4]; + u32 src_qp; u16 qp_id; union { diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index f8c7b408e842..505c0b48a761 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -37,21 +37,45 @@ /* SCSI CONSTANTS */ /*********************/ -#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) +#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2) #define BDQ_NUM_RESOURCES (4) #define BDQ_ID_RQ (0) #define BDQ_ID_IMM_DATA (1) -#define BDQ_NUM_IDS (2) +#define BDQ_ID_TQ (2) +#define BDQ_NUM_IDS (3) #define SCSI_NUM_SGES_SLOW_SGL_THR 8 #define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15) +/* SCSI op codes */ +#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89) +#define SCSI_OPCODE_READ_10 (0x28) +#define SCSI_OPCODE_WRITE_6 (0x0A) +#define SCSI_OPCODE_WRITE_10 (0x2A) +#define SCSI_OPCODE_WRITE_12 (0xAA) +#define SCSI_OPCODE_WRITE_16 (0x8A) +#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E) +#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE) +#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E) + +/* iSCSI Drv opaque */ +struct iscsi_drv_opaque { + __le16 reserved_zero[3]; + __le16 opaque; +}; + +/* Scsi 2B/8B opaque union */ +union scsi_opaque { + struct regpair fcoe_opaque; + struct iscsi_drv_opaque iscsi_opaque; +}; + /* SCSI buffer descriptor */ struct scsi_bd { struct regpair address; - struct regpair opaque; + union scsi_opaque opaque; }; /* Scsi Drv BDQ struct */ @@ -101,21 +125,24 @@ struct scsi_init_func_queues { #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 -#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F -#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 +#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3 +#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4 +#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7 +#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5 + __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS]; u8 num_queues; u8 queue_relative_offset; u8 cq_sb_pi; u8 cmdq_sb_pi; - __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS]; - __le16 reserved0; u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; + u8 reserved1; struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; - __le16 bdq_xon_threshold[BDQ_NUM_IDS]; __le16 cmdq_xoff_threshold; + __le16 bdq_xon_threshold[BDQ_NUM_IDS]; __le16 cmdq_xon_threshold; - __le32 reserved1; }; /* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */ @@ -147,4 +174,9 @@ struct scsi_terminate_extra_params { u8 reserved[4]; }; +/* SCSI Task Queue Element */ +struct scsi_tqe { + __le16 itid; +}; + #endif /* __STORAGE_COMMON__ */ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index 65b95fd25101..4a4845193539 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -79,24 +79,29 @@ struct tcp_offload_params { __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; - u8 flags; + __le16 flags; #define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 #define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 #define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4 #define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5 #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6 #define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7 #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 -#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8 +#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F +#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9 u8 ip_version; + u8 reserved0[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; @@ -108,17 +113,21 @@ struct tcp_offload_params { u8 rcv_wnd_scale; u8 connect_mode; __le16 srtt; - __le32 cwnd; __le32 ss_thresh; - __le16 reserved1; + __le32 rcv_wnd; + __le32 cwnd; u8 ka_max_probe_cnt; u8 dup_ack_theshold; + __le16 reserved1; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 initial_rcv_wnd; __le32 rcv_next; __le32 snd_una; __le32 snd_next; __le32 snd_max; __le32 snd_wnd; - __le32 rcv_wnd; __le32 snd_wl1; __le32 ts_recent; __le32 ts_recent_age; @@ -131,14 +140,10 @@ struct tcp_offload_params { u8 rt_cnt; __le16 rtt_var; __le16 fw_internal; - __le32 ka_timeout; - __le32 ka_interval; - __le32 max_rt_time; - __le32 initial_rcv_wnd; u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; - __le32 reserved3[2]; + __le32 reserved3; }; /* tcp offload parameters */ @@ -150,16 +155,19 @@ struct tcp_offload_params_opt2 { __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; - u8 flags; + __le16 flags; #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4 u8 ip_version; + u8 reserved1[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; @@ -173,7 +181,13 @@ struct tcp_offload_params_opt2 { __le16 syn_ip_payload_length; __le32 syn_phy_addr_lo; __le32 syn_phy_addr_hi; - __le32 reserved1[22]; + __le32 cwnd; + u8 ka_max_probe_cnt; + u8 reserved2[3]; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 reserved3[16]; }; /* tcp IPv4/IPv6 enum */ -- cgit v1.2.3 From ea4efe25ec937a7bc1108e26261c9dd4082375af Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 29 Dec 2017 12:46:27 +0000 Subject: net: phy: marvell10g: add MDI swap reporting Add reporting of the MDI swap to the Marvell 10G PHY driver by providing a generic implementation for the standard 10GBASE-T pair swap register and polarity register. We also support reading the MDI swap status for 1G and below from a PCS register. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index c4b4715caa21..bc379a408c4f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -901,6 +901,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev); int genphy_c45_read_pma(struct phy_device *phydev); int genphy_c45_pma_setup_forced(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); +int genphy_c45_read_mdix(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { -- cgit v1.2.3 From 8c5e850c0ce597cf5934beba8b55f7651d2cd6da Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 29 Dec 2017 12:46:38 +0000 Subject: net: phy: add helper to convert negotiation result to phy settings Add a helper to convert the result of the autonegotiation advertisment into the PHYs speed and duplex settings. If the result is full duplex, also extract the pause mode settings from the link partner advertisment. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index bc379a408c4f..a052e3768422 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -690,6 +690,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask, size_t maxbit); +void phy_resolve_aneg_linkmode(struct phy_device *phydev); + /** * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. -- cgit v1.2.3 From f10fcbcf91006a3223cffbc413b5abe17cf46937 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 29 Dec 2017 12:15:28 +0000 Subject: sfp: improve support for direct-attach copper cables Improve the support for direct-attach copper so that we avoid kernel warning messages, and report the appropriate PORT_DA type to userspace. Direct Attach cables can use a number of protocols depending on their range of speeds. Signed-off-by: Russell King Signed-off-by: David S. Miller --- include/linux/sfp.h | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 0c5c5f6ae1ec..e724d5a3dd80 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -165,7 +165,41 @@ struct sfp_eeprom_base { char vendor_rev[4]; union { __be16 optical_wavelength; - u8 cable_spec; + __be16 cable_compliance; + struct { +#if defined __BIG_ENDIAN_BITFIELD + u8 reserved60_2:6; + u8 fc_pi_4_app_h:1; + u8 sff8431_app_e:1; + u8 reserved61:8; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 sff8431_app_e:1; + u8 fc_pi_4_app_h:1; + u8 reserved60_2:6; + u8 reserved61:8; +#else +#error Unknown Endian +#endif + } __packed passive; + struct { +#if defined __BIG_ENDIAN_BITFIELD + u8 reserved60_4:4; + u8 fc_pi_4_lim:1; + u8 sff8431_lim:1; + u8 fc_pi_4_app_h:1; + u8 sff8431_app_e:1; + u8 reserved61:8; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 sff8431_app_e:1; + u8 fc_pi_4_app_h:1; + u8 sff8431_lim:1; + u8 fc_pi_4_lim:1; + u8 reserved60_4:4; + u8 reserved61:8; +#else +#error Unknown Endian +#endif + } __packed active; } __packed; u8 reserved62; u8 cc_base; -- cgit v1.2.3 From d2b977939b18ce7ff58aef56c3d2a8c64104197c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 2 Jan 2018 17:25:20 +0000 Subject: net: phy: fixed-phy: remove fixed_phy_update_state() mvneta is the only user of fixed_phy_update_state(), which has been converted to use phylink instead. Remove fixed_phy_update_state(). Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- include/linux/phy_fixed.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index cf6392de6eb0..ee54453a40a0 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); -extern int fixed_phy_update_state(struct phy_device *phydev, - const struct fixed_phy_status *status, - const struct fixed_phy_status *changed); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status, @@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, { return -ENODEV; } -static inline int fixed_phy_update_state(struct phy_device *phydev, - const struct fixed_phy_status *status, - const struct fixed_phy_status *changed) -{ - return -ENODEV; -} #endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ -- cgit v1.2.3 From 34dc08e4be208539b7c4aa8154a610e1736705e8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 2 Jan 2018 10:58:27 +0000 Subject: net: mdiobus: add unlocked accessors Add unlocked versions of the bus accessors, which allows access to the bus with all the tracing. These accessors validate that the bus mutex is held, which is a basic requirement for all mii bus accesses. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- include/linux/mdio.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 268aad47ecd3..2cfffe586885 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -262,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) return reg; } +int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); + int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); -- cgit v1.2.3 From 788f9933db6172801336d0ae2dec5bdc7525389f Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 2 Jan 2018 10:58:37 +0000 Subject: net: phy: add unlocked accessors Add unlocked versions of the bus accessors, which allows access to the bus with all the tracing. These accessors validate that the bus mutex is held, which is a basic requirement for all mii bus accesses. Also added is a read-modify-write unlocked accessor with the same locking requirements. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index a052e3768422..0c5a28520c65 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -717,6 +717,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); } +/** + * __phy_read - convenience function for reading a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to read + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_read(struct phy_device *phydev, u32 regnum) +{ + return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); +} + /** * phy_write - Convenience function for writing a given PHY register * @phydev: the phy_device struct @@ -732,6 +744,22 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); } +/** + * __phy_write - Convenience function for writing a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: value to write to @regnum + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, + val); +} + +int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); + /** * phy_interrupt_is_valid - Convenience function for testing a given PHY irq * @phydev: the phy_device struct -- cgit v1.2.3 From 78ffc4acceff48522b92d8fbf8f4a0ffe78838b2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 2 Jan 2018 10:58:43 +0000 Subject: net: phy: add paged phy register accessors Add a set of paged phy register accessors which are inherently safe in their design against other accesses interfering with the paged access. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 0c5a28520c65..af1a740dafd4 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -634,6 +634,9 @@ struct phy_driver { int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, u16 val); + int (*read_page)(struct phy_device *dev); + int (*write_page)(struct phy_device *dev, int page); + /* Get the size and type of the eeprom contained within a plug-in * module */ int (*module_info)(struct phy_device *dev, @@ -838,6 +841,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) */ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); +int phy_save_page(struct phy_device *phydev); +int phy_select_page(struct phy_device *phydev, int page); +int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); +int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); +int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); +int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set); + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); -- cgit v1.2.3 From 2b74e5be17d25fbca4be236a19efcd2ecae81cb2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 2 Jan 2018 10:58:53 +0000 Subject: net: phy: add phy_modify() accessor Add phy_modify() convenience accessor to complement the mdiobus counterpart. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index af1a740dafd4..135aba5c3d39 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -762,6 +762,7 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) } int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); +int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); /** * phy_interrupt_is_valid - Convenience function for testing a given PHY irq -- cgit v1.2.3 From 5f103c5d4dbadec0f2cacd39b6429e1b8a8cf983 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 3 Jan 2018 17:57:56 -0800 Subject: bpf: only build sockmap with CONFIG_INET The sockmap infrastructure is only aware of TCP sockets at the moment. In the future we plan to add UDP. In both cases CONFIG_NET should be built-in. So lets only build sockmap if CONFIG_INET is enabled. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 +- include/linux/bpf_types.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 7810ae57b357..9e03046d1df2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -554,7 +554,7 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) } #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); #else diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 978c1d9c9383..19b8349a3809 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) -#ifdef CONFIG_STREAM_PARSER +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) -- cgit v1.2.3 From b17c6b1f4538b66e9bac6710b57b343c2aafadea Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Fri, 29 Dec 2017 13:38:23 +0100 Subject: net: dsa: lan9303: phy_addr_sel_strap rename and retype chip->phy_addr_sel_strap is declared as a bool, but is also used as an integer address base. Rename 'phy_addr_sel_strap' to 'phy_addr_base', and change type to int. Signed-off-by: Egil Hjelmeland Signed-off-by: David S. Miller --- include/linux/dsa/lan9303.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h index b6514c29563f..b4f22112ba75 100644 --- a/include/linux/dsa/lan9303.h +++ b/include/linux/dsa/lan9303.h @@ -23,7 +23,7 @@ struct lan9303 { struct regmap_irq_chip_data *irq_data; struct gpio_desc *reset_gpio; u32 reset_duration; /* in [ms] */ - bool phy_addr_sel_strap; + int phy_addr_base; struct dsa_switch *ds; struct mutex indirect_mutex; /* protect indexed register access */ struct mutex alr_mutex; /* protect ALR access */ -- cgit v1.2.3 From aecd67b60722dd24353b0bc50e78a55b30707dcd Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 3 Jan 2018 11:25:13 +0100 Subject: xdp: base API for new XDP rx-queue info concept This patch only introduce the core data structures and API functions. All XDP enabled drivers must use the API before this info can used. There is a need for XDP to know more about the RX-queue a given XDP frames have arrived on. For both the XDP bpf-prog and kernel side. Instead of extending xdp_buff each time new info is needed, the patch creates a separate read-mostly struct xdp_rxq_info, that contains this info. We stress this data/cache-line is for read-only info. This is NOT for dynamic per packet info, use the data_meta for such use-cases. The performance advantage is this info can be setup at RX-ring init time, instead of updating N-members in xdp_buff. A possible (driver level) micro optimization is that xdp_buff->rxq assignment could be done once per XDP/NAPI loop. The extra pointer deref only happens for program needing access to this info (thus, no slowdown to existing use-cases). Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 2b0df2703671..425056c7f96c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -20,6 +20,7 @@ #include #include +#include #include #include @@ -503,6 +504,7 @@ struct xdp_buff { void *data_end; void *data_meta; void *data_hard_start; + struct xdp_rxq_info *rxq; }; /* Compute the linear packet data range [data, data_end) which -- cgit v1.2.3 From e817f85652c14d78f170b18797e4c477c78949e0 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 3 Jan 2018 11:26:09 +0100 Subject: xdp: generic XDP handling of xdp_rxq_info Hook points for xdp_rxq_info: * reg : netif_alloc_rx_queues * unreg: netif_free_rx_queues The net_device have some members (num_rx_queues + real_num_rx_queues) and data-area (dev->_rx with struct netdev_rx_queue's) that were primarily used for exporting information about RPS (CONFIG_RPS) queues to sysfs (CONFIG_SYSFS). For generic XDP extend struct netdev_rx_queue with the xdp_rxq_info, and remove some of the CONFIG_SYSFS ifdefs. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Alexei Starovoitov --- include/linux/netdevice.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 49bfc6eec74c..440b000f07f4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -44,6 +44,7 @@ #include #endif #include +#include #include #include @@ -686,6 +687,7 @@ struct netdev_rx_queue { #endif struct kobject kobj; struct net_device *dev; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; /* -- cgit v1.2.3 From 8c873e2199700c2de7dbd5eedb9d90d5f109462b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 1 Dec 2017 00:21:04 +0100 Subject: netfilter: core: free hooks with call_rcu Giuseppe Scrivano says: "SELinux, if enabled, registers for each new network namespace 6 netfilter hooks." Cost for this is high. With synchronize_net() removed: "The net benefit on an SMP machine with two cores is that creating a new network namespace takes -40% of the original time." This patch replaces synchronize_net+kvfree with call_rcu(). We store rcu_head at the tail of a structure that has no fixed layout, i.e. we cannot use offsetof() to compute the start of the original allocation. Thus store this information right after the rcu head. We could simplify this by just placing the rcu_head at the start of struct nf_hook_entries. However, this structure is used in packet processing hotpath, so only place what is needed for that at the beginning of the struct. Reported-by: Giuseppe Scrivano Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index b24e9b101651..792f6d535707 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -77,17 +77,28 @@ struct nf_hook_entry { void *priv; }; +struct nf_hook_entries_rcu_head { + struct rcu_head head; + void *allocation; +}; + struct nf_hook_entries { u16 num_hook_entries; /* padding */ struct nf_hook_entry hooks[]; - /* trailer: pointers to original orig_ops of each hook. - * - * This is not part of struct nf_hook_entry since its only - * needed in slow path (hook register/unregister). + /* trailer: pointers to original orig_ops of each hook, + * followed by rcu_head and scratch space used for freeing + * the structure via call_rcu. * + * This is not part of struct nf_hook_entry since its only + * needed in slow path (hook register/unregister): * const struct nf_hook_ops *orig_ops[] + * + * For the same reason, we store this at end -- its + * only needed when a hook is deleted, not during + * packet path processing: + * struct nf_hook_entries_rcu_head head */ }; -- cgit v1.2.3 From b0f38338aef2dae5ade3c16acf713737e3b15a73 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 3 Dec 2017 00:58:47 +0100 Subject: netfilter: reduce size of hook entry point locations struct net contains: struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; which store the hook entry point locations for the various protocol families and the hooks. Using array results in compact c code when doing accesses, i.e. x = rcu_dereference(net->nf.hooks[pf][hook]); but its also wasting a lot of memory, as most families are not used. So split the array into those families that are used, which are only 5 (instead of 13). In most cases, the 'pf' argument is constant, i.e. gcc removes switch statement. struct net before: /* size: 5184, cachelines: 81, members: 46 */ after: /* size: 4672, cachelines: 73, members: 46 */ Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 792f6d535707..9dcbcdfa3b82 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -195,7 +195,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - struct nf_hook_entries *hook_head; + struct nf_hook_entries *hook_head = NULL; int ret = 1; #ifdef HAVE_JUMP_LABEL @@ -206,7 +206,27 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, #endif rcu_read_lock(); - hook_head = rcu_dereference(net->nf.hooks[pf][hook]); + switch (pf) { + case NFPROTO_IPV4: + hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); + break; + case NFPROTO_IPV6: + hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); + break; + case NFPROTO_ARP: + hook_head = rcu_dereference(net->nf.hooks_arp[hook]); + break; + case NFPROTO_BRIDGE: + hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); + break; + case NFPROTO_DECNET: + hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); + break; + default: + WARN_ON_ONCE(1); + break; + } + if (hook_head) { struct nf_hook_state state; -- cgit v1.2.3 From e58f33cc84bc089c430ac955f3cad6380ae98591 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 7 Dec 2017 16:28:23 +0100 Subject: netfilter: add defines for arp/decnet max hooks The kernel already has defines for this, but they are in uapi exposed headers. Including these from netns.h causes build errors and also adds unneeded dependencies on heads that we don't need. So move these defines to netfilter_defs.h and place the uapi ones in ifndef __KERNEL__ to keep them for userspace. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_defs.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h index dc6111adea06..fdcdf2bf34df 100644 --- a/include/linux/netfilter_defs.h +++ b/include/linux/netfilter_defs.h @@ -7,4 +7,10 @@ /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ #define NF_MAX_HOOKS 8 +/* in/out/forward only */ +#define NF_ARP_NUMHOOKS 3 + +/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */ +#define NF_DN_NUMHOOKS 7 + #endif -- cgit v1.2.3 From bb4badf3a3dc81190f7c1c1fa063cdefb18df45f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 7 Dec 2017 16:28:25 +0100 Subject: netfilter: don't allocate space for decnet hooks unless needed no need to define hook points if the family isn't supported. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 9dcbcdfa3b82..ce4e91df8b56 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -219,9 +219,11 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, case NFPROTO_BRIDGE: hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); break; +#if IS_ENABLED(CONFIG_DECNET) case NFPROTO_DECNET: hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); break; +#endif default: WARN_ON_ONCE(1); break; -- cgit v1.2.3 From 2a95183a5e0375df756efb2ca37602d71e8455f9 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 7 Dec 2017 16:28:26 +0100 Subject: netfilter: don't allocate space for arp/bridge hooks unless needed no need to define hook points if the family isn't supported. Because we need these hooks for either nftables, arp/ebtables or the 'call-iptables' hack we have in the bridge layer add two new dependencies, NETFILTER_FAMILY_{ARP,BRIDGE}, and have the users select them. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index ce4e91df8b56..ee7a9cbd8d81 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -214,10 +214,14 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); break; case NFPROTO_ARP: +#ifdef CONFIG_NETFILTER_FAMILY_ARP hook_head = rcu_dereference(net->nf.hooks_arp[hook]); +#endif break; case NFPROTO_BRIDGE: +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); +#endif break; #if IS_ENABLED(CONFIG_DECNET) case NFPROTO_DECNET: -- cgit v1.2.3 From 256d94ba33070564f3e98ded435d917a1bbc5e82 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 7 Dec 2017 16:28:27 +0100 Subject: netfilter: reduce NF_MAX_HOOKS define This can be same as NF_INET_NUMHOOKS if we don't support DECNET. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_defs.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h index fdcdf2bf34df..8dddfb151f00 100644 --- a/include/linux/netfilter_defs.h +++ b/include/linux/netfilter_defs.h @@ -4,13 +4,17 @@ #include -/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ -#define NF_MAX_HOOKS 8 - /* in/out/forward only */ #define NF_ARP_NUMHOOKS 3 /* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */ #define NF_DN_NUMHOOKS 7 +#if IS_ENABLED(CONFIG_DECNET) +/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ +#define NF_MAX_HOOKS NF_DN_NUMHOOKS +#else +#define NF_MAX_HOOKS NF_INET_NUMHOOKS +#endif + #endif -- cgit v1.2.3 From 03d13b6868a261f24fbc82b6a2d5823df8d075d3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 8 Dec 2017 17:01:53 +0100 Subject: netfilter: xtables: add and use xt_request_find_table_lock currently we always return -ENOENT to userspace if we can't find a particular table, or if the table initialization fails. Followup patch will make nat table init fail in case nftables already registered a nat hook so this change makes xt_find_table_lock return an ERR_PTR to return the errno value reported from the table init function. Add xt_request_find_table_lock as try_then_request_module replacement and use it where needed. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/x_tables.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 33f7530f96b9..1313b35c3ab7 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -320,6 +320,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target, struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name); +struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, + const char *name); void xt_table_unlock(struct xt_table *t); int xt_proto_init(struct net *net, u_int8_t af); -- cgit v1.2.3 From f92b40a8b2645af38bd6814651c59c1e690db53d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 8 Dec 2017 17:01:54 +0100 Subject: netfilter: core: only allow one nat hook per hook point The netfilter NAT core cannot deal with more than one NAT hook per hook location (prerouting, input ...), because the NAT hooks install a NAT null binding in case the iptables nat table (iptable_nat hooks) or the corresponding nftables chain (nft nat hooks) doesn't specify a nat transformation. Null bindings are needed to detect port collsisions between NAT-ed and non-NAT-ed connections. This causes nftables NAT rules to not work when iptable_nat module is loaded, and vice versa because nat binding has already been attached when the second nat hook is consulted. The netfilter core is not really the correct location to handle this (hooks are just hooks, the core has no notion of what kinds of side effects a hook implements), but its the only place where we can check for conflicts between both iptables hooks and nftables hooks without adding dependencies. So add nat annotation to hook_ops to describe those hooks that will add NAT bindings and then make core reject if such a hook already exists. The annotation fills a padding hole, in case further restrictions appar we might change this to a 'u8 type' instead of bool. iptables error if nft nat hook active: iptables -t nat -A POSTROUTING -j MASQUERADE iptables v1.4.21: can't initialize iptables table `nat': File exists Perhaps iptables or your kernel needs to be upgraded. nftables error if iptables nat table present: nft -f /etc/nftables/ipv4-nat /usr/etc/nftables/ipv4-nat:3:1-2: Error: Could not process rule: File exists table nat { ^^ Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index ee7a9cbd8d81..85a0b0d599e6 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -67,6 +67,7 @@ struct nf_hook_ops { struct net_device *dev; void *priv; u_int8_t pf; + bool nat_hook; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; -- cgit v1.2.3 From ef71fe27ec2f1607e38af160ab261a8d8ef8e121 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 27 Nov 2017 21:55:14 +0100 Subject: netfilter: move checksum indirection to struct nf_ipv6_ops We cannot make a direct call to nf_ip6_checksum() because that would result in autoloading the 'ipv6' module because of symbol dependencies. Therefore, define checksum indirection in nf_ipv6_ops where this really belongs to. For IPv4, we can indeed make a direct function call, which is faster, given IPv4 is built-in in the networking code by default. Still, CONFIG_INET=n and CONFIG_NETFILTER=y is possible, so define empty inline stub for IPv4 in such case. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 19 +++---------------- include/linux/netfilter_ipv4.h | 10 ++++++++++ include/linux/netfilter_ipv6.h | 2 ++ 3 files changed, 15 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 85a0b0d599e6..4c4d38ef1a76 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -311,8 +311,6 @@ struct nf_queue_entry; struct nf_afinfo { unsigned short family; - __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol); __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, @@ -333,20 +331,9 @@ static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) return rcu_dereference(nf_afinfo[family]); } -static inline __sum16 -nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, - u_int8_t protocol, unsigned short family) -{ - const struct nf_afinfo *afinfo; - __sum16 csum = 0; - - rcu_read_lock(); - afinfo = nf_get_afinfo(family); - if (afinfo) - csum = afinfo->checksum(skb, hook, dataoff, protocol); - rcu_read_unlock(); - return csum; -} +__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol, + unsigned short family); static inline __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 98c03b2462b5..c7deb78bac88 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -7,6 +7,16 @@ #include int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); + +#ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); +#else +static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol) +{ + return 0; +} +#endif /* CONFIG_INET */ + #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 47c6b04c28c0..b136101b5cde 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -19,6 +19,8 @@ struct nf_ipv6_ops { void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); + __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); }; #ifdef CONFIG_NETFILTER -- cgit v1.2.3 From f7dcbe2f36a660140ecb286e15f502028d96ffdf Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 20 Dec 2017 16:04:18 +0100 Subject: netfilter: move checksum_partial indirection to struct nf_ipv6_ops We cannot make a direct call to nf_ip6_checksum_partial() because that would result in autoloading the 'ipv6' module because of symbol dependencies. Therefore, define checksum_partial indirection in nf_ipv6_ops where this really belongs to. For IPv4, we can indeed make a direct function call, which is faster, given IPv4 is built-in in the networking code by default. Still, CONFIG_INET=n and CONFIG_NETFILTER=y is possible, so define empty inline stub for IPv4 in such case. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 24 +++--------------------- include/linux/netfilter_ipv4.h | 11 +++++++++++ include/linux/netfilter_ipv6.h | 3 +++ 3 files changed, 17 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 4c4d38ef1a76..70b238eff29f 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -311,11 +311,6 @@ struct nf_queue_entry; struct nf_afinfo { unsigned short family; - __sum16 (*checksum_partial)(struct sk_buff *skb, - unsigned int hook, - unsigned int dataoff, - unsigned int len, - u_int8_t protocol); int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); void (*saveroute)(const struct sk_buff *skb, @@ -335,22 +330,9 @@ __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family); -static inline __sum16 -nf_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol, unsigned short family) -{ - const struct nf_afinfo *afinfo; - __sum16 csum = 0; - - rcu_read_lock(); - afinfo = nf_get_afinfo(family); - if (afinfo) - csum = afinfo->checksum_partial(skb, hook, dataoff, len, - protocol); - rcu_read_unlock(); - return csum; -} +__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol, unsigned short family); int nf_register_afinfo(const struct nf_afinfo *afinfo); void nf_unregister_afinfo(const struct nf_afinfo *afinfo); diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index c7deb78bac88..811425ece8d5 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -11,12 +11,23 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type) #ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); +__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol); #else static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { return 0; } +static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb, + unsigned int hook, + unsigned int dataoff, + unsigned int len, + u_int8_t protocol) +{ + return 0; +} #endif /* CONFIG_INET */ #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index b136101b5cde..29e8f1286584 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -21,6 +21,9 @@ struct nf_ipv6_ops { int (*output)(struct net *, struct sock *, struct sk_buff *)); __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); + __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol); }; #ifdef CONFIG_NETFILTER -- cgit v1.2.3 From 7db9a51e0f9931446ed4231feb1040ed5134fc60 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 20 Dec 2017 16:12:55 +0100 Subject: netfilter: remove saveroute indirection in struct nf_afinfo This is only used by nf_queue.c and this function comes with no symbol dependencies with IPv6, it just refers to structure layouts. Therefore, we can replace it by a direct function call from where it belongs. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 2 -- include/linux/netfilter_ipv4.h | 10 ++++++++++ include/linux/netfilter_ipv6.h | 9 +++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 70b238eff29f..5fc2443225f9 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -313,8 +313,6 @@ struct nf_afinfo { unsigned short family; int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); - void (*saveroute)(const struct sk_buff *skb, - struct nf_queue_entry *entry); int (*reroute)(struct net *net, struct sk_buff *skb, const struct nf_queue_entry *entry); int route_key_size; diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 811425ece8d5..8d4ef1e3ce74 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -6,6 +6,16 @@ #include +/* Extra routing may needed on local out, as the QUEUE target never returns + * control to the table. + */ +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); #ifdef CONFIG_INET diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 29e8f1286584..08d58dc018b5 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -9,6 +9,15 @@ #include +/* Extra routing may needed on local out, as the QUEUE target never returns + * control to the table. + */ +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + /* * Hook functions for ipv6 to allow xt_* modules to be built-in even * if IPv6 is a module. -- cgit v1.2.3 From 3f87c08c615f567799b426aff0341ea8010a0ebb Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 27 Nov 2017 22:29:52 +0100 Subject: netfilter: move route indirection to struct nf_ipv6_ops We cannot make a direct call to nf_ip6_route() because that would result in autoloading the 'ipv6' module because of symbol dependencies. Therefore, define route indirection in nf_ipv6_ops where this really belongs to. For IPv4, we can indeed make a direct function call, which is faster, given IPv4 is built-in in the networking code by default. Still, CONFIG_INET=n and CONFIG_NETFILTER=y is possible, so define empty inline stub for IPv4 in such case. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 4 ++-- include/linux/netfilter_ipv4.h | 7 +++++++ include/linux/netfilter_ipv6.h | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 5fc2443225f9..02c35eabd348 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -311,8 +311,6 @@ struct nf_queue_entry; struct nf_afinfo { unsigned short family; - int (*route)(struct net *net, struct dst_entry **dst, - struct flowi *fl, bool strict); int (*reroute)(struct net *net, struct sk_buff *skb, const struct nf_queue_entry *entry); int route_key_size; @@ -331,6 +329,8 @@ __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol, unsigned short family); +int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict, unsigned short family); int nf_register_afinfo(const struct nf_afinfo *afinfo); void nf_unregister_afinfo(const struct nf_afinfo *afinfo); diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 8d4ef1e3ce74..2a4e2c415647 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -24,6 +24,8 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol); +int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); #else static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) @@ -38,6 +40,11 @@ static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb, { return 0; } +static inline int nf_ip_route(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_INET */ #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 08d58dc018b5..e5700bb314a1 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -33,6 +33,8 @@ struct nf_ipv6_ops { __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol); + int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); }; #ifdef CONFIG_NETFILTER -- cgit v1.2.3 From ce388f452f0af2013c657dd24be4415d94e7704f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 27 Nov 2017 22:50:26 +0100 Subject: netfilter: move reroute indirection to struct nf_ipv6_ops We cannot make a direct call to nf_ip6_reroute() because that would result in autoloading the 'ipv6' module because of symbol dependencies. Therefore, define reroute indirection in nf_ipv6_ops where this really belongs to. For IPv4, we can indeed make a direct function call, which is faster, given IPv4 is built-in in the networking code by default. Still, CONFIG_INET=n and CONFIG_NETFILTER=y is possible, so define empty inline stub for IPv4 in such case. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 3 +-- include/linux/netfilter_ipv4.h | 8 ++++++++ include/linux/netfilter_ipv6.h | 3 +++ 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 02c35eabd348..8358e717eabd 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -311,8 +311,6 @@ struct nf_queue_entry; struct nf_afinfo { unsigned short family; - int (*reroute)(struct net *net, struct sk_buff *skb, - const struct nf_queue_entry *entry); int route_key_size; }; @@ -331,6 +329,7 @@ __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, u_int8_t protocol, unsigned short family); int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict, unsigned short family); +int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); int nf_register_afinfo(const struct nf_afinfo *afinfo); void nf_unregister_afinfo(const struct nf_afinfo *afinfo); diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 2a4e2c415647..b31dabfdb453 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -18,6 +18,8 @@ struct ip_rt_info { int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); +struct nf_queue_entry; + #ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); @@ -26,6 +28,7 @@ __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, u_int8_t protocol); int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); +int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); #else static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) @@ -45,6 +48,11 @@ static inline int nf_ip_route(struct net *net, struct dst_entry **dst, { return -EOPNOTSUPP; } +static inline int nf_ip_reroute(struct sk_buff *skb, + const struct nf_queue_entry *entry) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_INET */ #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index e5700bb314a1..288c597e75b3 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -18,6 +18,8 @@ struct ip6_rt_info { u_int32_t mark; }; +struct nf_queue_entry; + /* * Hook functions for ipv6 to allow xt_* modules to be built-in even * if IPv6 is a module. @@ -35,6 +37,7 @@ struct nf_ipv6_ops { u_int8_t protocol); int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); + int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); }; #ifdef CONFIG_NETFILTER -- cgit v1.2.3 From 464356234f88518f7d0678b979013e78607e8266 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 27 Nov 2017 22:58:37 +0100 Subject: netfilter: remove route_key_size field in struct nf_afinfo This is only needed by nf_queue, place this code where it belongs. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 8358e717eabd..58a169e425f7 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -311,7 +311,6 @@ struct nf_queue_entry; struct nf_afinfo { unsigned short family; - int route_key_size; }; extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; -- cgit v1.2.3 From b3a61254d83d577f8a44b86a5e68bc124011336a Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sat, 9 Dec 2017 17:05:53 +0100 Subject: netfilter: remove struct nf_afinfo and its helper functions This abstraction has no clients anymore, remove it. This is what remains from previous authors, so correct copyright statement after recent modifications and code removal. Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 58a169e425f7..85a1a0b32c66 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -309,16 +309,6 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); struct flowi; struct nf_queue_entry; -struct nf_afinfo { - unsigned short family; -}; - -extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; -static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) -{ - return rcu_dereference(nf_afinfo[family]); -} - __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family); @@ -330,9 +320,6 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict, unsigned short family); int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); -int nf_register_afinfo(const struct nf_afinfo *afinfo); -void nf_unregister_afinfo(const struct nf_afinfo *afinfo); - #include extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); -- cgit v1.2.3 From 4750005a85f76b3df1e5df19c283dde96b071515 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Sat, 6 Jan 2018 15:22:01 +0100 Subject: netfilter: ipset: Fix "don't update counters" mode when counters used at the matching The matching of the counters was not taken into account, fixed. Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/ip_set.h | 6 ++++++ include/linux/netfilter/ipset/ip_set_counter.h | 25 +++++++++++++++++++------ 2 files changed, 25 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 8e42253e5d4d..34fc80f3eb90 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -122,6 +122,8 @@ struct ip_set_ext { u64 bytes; char *comment; u32 timeout; + u8 packets_op; + u8 bytes_op; }; struct ip_set; @@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext); extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, const void *e, bool active); +extern bool ip_set_match_extensions(struct ip_set *set, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, + u32 flags, void *data); static inline int ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h index bb6fba480118..3d33a2c3f39f 100644 --- a/include/linux/netfilter/ipset/ip_set_counter.h +++ b/include/linux/netfilter/ipset/ip_set_counter.h @@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter) return (u64)atomic64_read(&(counter)->packets); } +static inline bool +ip_set_match_counter(u64 counter, u64 match, u8 op) +{ + switch (op) { + case IPSET_COUNTER_NONE: + return true; + case IPSET_COUNTER_EQ: + return counter == match; + case IPSET_COUNTER_NE: + return counter != match; + case IPSET_COUNTER_LT: + return counter < match; + case IPSET_COUNTER_GT: + return counter > match; + } + return false; +} + static inline void ip_set_update_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) + const struct ip_set_ext *ext, u32 flags) { if (ext->packets != ULLONG_MAX && !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { ip_set_add_bytes(ext->bytes, counter); ip_set_add_packets(ext->packets, counter); } - if (flags & IPSET_FLAG_MATCH_COUNTERS) { - mext->packets = ip_set_get_packets(counter); - mext->bytes = ip_set_get_bytes(counter); - } } static inline bool -- cgit v1.2.3 From e3e49ca9b033adbc99aca25db4b46b0eadd7cfb9 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 5 Jan 2018 00:26:46 +0300 Subject: sh_eth: remove sh_eth_plat_data::edmac_endian Since the commit 888cc8c20cf ("sh_eth: remove EDMAC_BIG_ENDIAN") (geez, I didn't realize that was 2 years ago!) the initializers in the SuperH platform code for the 'sh_eth_plat_data::edmac_endian' stopped to matter, so we can remove that field for good (not sure if it was ever useful -- SH7786 Ether has been reported to have the same EDMAC descriptor/register endiannes as configured for the SuperH CPU)... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- include/linux/sh_eth.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index ff3642d267f7..dd5d69323701 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -5,12 +5,9 @@ #include #include -enum {EDMAC_LITTLE_ENDIAN}; - struct sh_eth_plat_data { int phy; int phy_irq; - int edmac_endian; phy_interface_t phy_interface; void (*set_mdio_gate)(void *addr); -- cgit v1.2.3 From 72dd831e24cc9487a9cd534fdd675fe97e3c1839 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 7 Jan 2018 12:08:35 +0200 Subject: net: Fix netdev_WARN_ONCE macro netdev_WARN_ONCE is broken (whoops..), this fix will remove the unnecessary "condition" parameter, add the missing comma and change "arg" to "args". Fixes: 375ef2b1f0d0 ("net: Introduce netdev_*_once functions") Signed-off-by: Gal Pressman Reviewed-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 440b000f07f4..9415e939f8fe 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4409,8 +4409,8 @@ do { \ WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ netdev_reg_state(dev), ##args) -#define netdev_WARN_ONCE(dev, condition, format, arg...) \ - WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev) \ +#define netdev_WARN_ONCE(dev, format, args...) \ + WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev), \ netdev_reg_state(dev), ##args) /* netif printk helpers, similar to netdev_printk */ -- cgit v1.2.3 From e1cfe3d0eb0430f4fb849ef606fb095b6e149853 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 7 Jan 2018 12:08:36 +0200 Subject: net: No line break on netdev_WARN* formatting Remove the unnecessary line break between the netdev name and reg state to the actual message that should be printed. For example, this: [86730.307236] ------------[ cut here ]------------ [86730.313496] netdevice: enp27s0f0 Message from the driver [...] Will be replaced with: [86770.259289] ------------[ cut here ]------------ [86770.265191] netdevice: enp27s0f0: Message from the driver [...] Signed-off-by: Gal Pressman Reviewed-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9415e939f8fe..6f54c58b623b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4406,11 +4406,11 @@ do { \ * file/line information and a backtrace. */ #define netdev_WARN(dev, format, args...) \ - WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ + WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ netdev_reg_state(dev), ##args) #define netdev_WARN_ONCE(dev, format, args...) \ - WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev), \ + WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ netdev_reg_state(dev), ##args) /* netif printk helpers, similar to netdev_printk */ -- cgit v1.2.3 From 40817cdbb695de49fb1bfe857b0f440541cb22d8 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 25 Jun 2017 12:38:45 +0300 Subject: net/mlx5: Add hairpin definitions to the FW API Add hairpin definitions to the IFC file. This includes the HCA ID, few HCA hairpin capabilities, new fields in RQ/SQ used later for the pairing and the WQ hairpin data size attribute. Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index d44ec5f41d4a..78e36fc2609e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -794,7 +794,10 @@ enum { }; struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x80]; + u8 reserved_at_0[0x30]; + u8 vhca_id[0x10]; + + u8 reserved_at_40[0x40]; u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; @@ -1023,12 +1026,19 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_3b8[0x3]; u8 log_min_stride_sz_sq[0x5]; - u8 reserved_at_3c0[0x1b]; + u8 hairpin[0x1]; + u8 reserved_at_3c1[0x2]; + u8 log_max_hairpin_queues[0x5]; + u8 reserved_at_3c8[0x3]; + u8 log_max_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3d0[0xb]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; u8 disable_local_lb[0x1]; - u8 reserved_at_3e2[0x9]; + u8 reserved_at_3e2[0x1]; + u8 log_min_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3e8[0x3]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; @@ -1162,7 +1172,10 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_at_120[0x15]; + u8 reserved_at_120[0xb]; + u8 log_hairpin_data_sz[0x5]; + u8 reserved_at_130[0x5]; + u8 log_wqe_num_of_strides[0x3]; u8 two_byte_shift_en[0x1]; u8 reserved_at_139[0x4]; @@ -2482,7 +2495,8 @@ struct mlx5_ifc_sqc_bits { u8 state[0x4]; u8 reg_umr[0x1]; u8 allow_swp[0x1]; - u8 reserved_at_e[0x12]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2490,7 +2504,13 @@ struct mlx5_ifc_sqc_bits { u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_at_60[0x90]; + u8 reserved_at_60[0x8]; + u8 hairpin_peer_rq[0x18]; + + u8 reserved_at_80[0x10]; + u8 hairpin_peer_vhca[0x10]; + + u8 reserved_at_a0[0x50]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; @@ -2562,7 +2582,8 @@ struct mlx5_ifc_rqc_bits { u8 state[0x4]; u8 reserved_at_c[0x1]; u8 flush_in_error_en[0x1]; - u8 reserved_at_e[0x12]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2576,7 +2597,13 @@ struct mlx5_ifc_rqc_bits { u8 reserved_at_80[0x8]; u8 rmpn[0x18]; - u8 reserved_at_a0[0xe0]; + u8 reserved_at_a0[0x8]; + u8 hairpin_peer_sq[0x18]; + + u8 reserved_at_c0[0x10]; + u8 hairpin_peer_vhca[0x10]; + + u8 reserved_at_e0[0xa0]; struct mlx5_ifc_wq_bits wq; }; -- cgit v1.2.3 From 18e568c390c61682e747178f136bcbc45c707882 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 12 Nov 2017 15:15:10 +0200 Subject: net/mlx5: Hairpin pair core object setup Low level code to setup hairpin pair core object, deals with: - create hairpin RQs/SQs - destroy hairpin RQs/SQs - modifying hairpin RQs/SQs - pairing (rst2rdy) and unpairing (rdy2rst) Unlike conventional RQs/SQs, the memory used for the packet and descriptor buffers is allocated by the firmware and not the driver. The driver sets the overall data size (log). Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- include/linux/mlx5/transobj.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 88441f5ece25..a228310c1968 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -75,4 +75,23 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen); void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); +struct mlx5_hairpin_params { + u8 log_data_size; + u16 q_counter; +}; + +struct mlx5_hairpin { + struct mlx5_core_dev *func_mdev; + struct mlx5_core_dev *peer_mdev; + + u32 rqn; + u32 sqn; +}; + +struct mlx5_hairpin * +mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, + struct mlx5_core_dev *peer_mdev, + struct mlx5_hairpin_params *params); + +void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair); #endif /* __TRANSOBJ_H__ */ -- cgit v1.2.3 From c5a9f6f0ab4054082dd5ce9bbdaa8e8ff05cf365 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Mon, 17 Jul 2017 13:47:07 +0300 Subject: net/core: Add drop counters to VF statistics Modern hardware can decide to drop packets going to/from a VF. Add receive and transmit drop counters to be displayed at hypervisor layer in iproute2 per VF statistics. Signed-off-by: Eugenia Emantayev Signed-off-by: Saeed Mahameed --- include/linux/if_link.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 4c54611e03e9..622658dfbf0a 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -13,6 +13,8 @@ struct ifla_vf_stats { __u64 tx_bytes; __u64 broadcast; __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; }; struct ifla_vf_info { -- cgit v1.2.3 From 5990a30510ed1c37a769d3a035ad2d030b843528 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 4 Jan 2018 11:14:27 +0800 Subject: tun/tap: use ptr_ring instead of skb_array This patch switches to use ptr_ring instead of skb_array. This will be used to enqueue different types of pointers by encoding type into lower bits. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/if_tap.h | 6 +++--- include/linux/if_tun.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 3ecef57c31e3..8e66866c11be 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -4,7 +4,7 @@ #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); -struct skb_array *tap_get_skb_array(struct file *file); +struct ptr_ring *tap_get_ptr_ring(struct file *file); #else #include #include @@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct skb_array *tap_get_skb_array(struct file *f) +static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) { return ERR_PTR(-EINVAL); } @@ -70,7 +70,7 @@ struct tap_queue { u16 queue_index; bool enabled; struct list_head next; - struct skb_array skb_array; + struct ptr_ring ring; }; rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index bf9bdf42d577..bdee9b83baf6 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -19,7 +19,7 @@ #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); -struct skb_array *tun_get_skb_array(struct file *file); +struct ptr_ring *tun_get_tx_ring(struct file *file); #else #include #include @@ -29,7 +29,7 @@ static inline struct socket *tun_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct skb_array *tun_get_skb_array(struct file *f) +static inline struct ptr_ring *tun_get_tx_ring(struct file *f) { return ERR_PTR(-EINVAL); } -- cgit v1.2.3 From fc72d1d54dd9ffe2552c76b17e9129803ca7b255 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 4 Jan 2018 11:14:28 +0800 Subject: tuntap: XDP transmission This patch implements XDP transmission for TAP. Since we can't create new queues for TAP during XDP set, exist ptr_ring was reused for queuing XDP buffers. To differ xdp_buff from sk_buff, TUN_XDP_FLAG (0x1UL) was encoded into lowest bit of xpd_buff pointer during ptr_ring_produce, and was decoded during consuming. XDP metadata was stored in the headroom of the packet which should work in most of cases since driver usually reserve enough headroom. Very minor changes were done for vhost_net: it just need to peek the length depends on the type of pointer. Tests were done on two Intel E5-2630 2.40GHz machines connected back to back through two 82599ES. Traffic were generated/received through MoonGen/testpmd(rxonly). It reports ~20% improvements when xdp_redirect_map is doing redirection from ixgbe to TAP (from 2.50Mpps to 3.05Mpps) Cc: Jesper Dangaard Brouer Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/if_tun.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include/linux') diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index bdee9b83baf6..08e66827ad8e 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -17,9 +17,14 @@ #include +#define TUN_XDP_FLAG 0x1UL + #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); +bool tun_is_xdp_buff(void *ptr); +void *tun_xdp_to_ptr(void *ptr); +void *tun_ptr_to_xdp(void *ptr); #else #include #include @@ -33,5 +38,17 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f) { return ERR_PTR(-EINVAL); } +static inline bool tun_is_xdp_buff(void *ptr) +{ + return false; +} +void *tun_xdp_to_ptr(void *ptr) +{ + return NULL; +} +void *tun_ptr_to_xdp(void *ptr) +{ + return NULL; +} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ -- cgit v1.2.3 From 430e68d10baf55e4c40d4dd1de8201c1caf5dddd Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Wed, 10 Jan 2018 12:26:06 +0000 Subject: bpf: export function to write into verifier log buffer Rename the BPF verifier `verbose()` to `bpf_verifier_log_write()` and export it, so that other components (in particular, drivers for BPF offload) can reuse the user buffer log to dump error messages at verification time. Renaming `verbose()` was necessary in order to avoid a name so generic to be exported to the global namespace. However to prevent too much pain for backports, the calls to `verbose()` in the kernel BPF verifier were not changed. Instead, use function aliasing to make `verbose` point to `bpf_verifier_log_write`. Another solution could consist in making a wrapper around `verbose()`, but since it is a variadic function, I don't see a clean way without creating two identical wrappers, one for the verifier and one to export. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 2feb218c001d..6b66cd1aa0b9 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -192,6 +192,9 @@ struct bpf_verifier_env { u32 subprog_cnt; }; +__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, + const char *fmt, ...); + static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) { struct bpf_verifier_state *cur = env->cur_state; -- cgit v1.2.3 From 4c4dbb4a7363ce56c77cf8eb2090f67c1b0aa2cc Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Tue, 9 Jan 2018 16:06:18 -0500 Subject: net/mlx5e: Move dynamic interrupt coalescing code to include/linux This move allows drivers to add private structure elements to track the number of packets, bytes, and interrupts events per ring. A driver also defines a workqueue handler to act on this collected data once per poll and modify the coalescing parameters per ring. Signed-off-by: Andy Gospodarek Acked-by: Tal Gilboa Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/net_dim.h | 377 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 377 insertions(+) create mode 100644 include/linux/net_dim.h (limited to 'include/linux') diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h new file mode 100644 index 000000000000..741510fb74cb --- /dev/null +++ b/include/linux/net_dim.h @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NET_DIM_H +#define NET_DIM_H + +#include + +struct net_dim_cq_moder { + u16 usec; + u16 pkts; + u8 cq_period_mode; +}; + +struct net_dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; +}; + +struct net_dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ +}; + +struct net_dim { /* Adaptive Moderation */ + u8 state; + struct net_dim_stats prev_stats; + struct net_dim_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum { + NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + NET_DIM_CQ_PERIOD_NUM_MODES +}; + +/* Adaptive moderation logic */ +enum { + NET_DIM_START_MEASURE, + NET_DIM_MEASURE_IN_PROGRESS, + NET_DIM_APPLY_NEW_PROFILE, +}; + +enum { + NET_DIM_PARKING_ON_TOP, + NET_DIM_PARKING_TIRED, + NET_DIM_GOING_RIGHT, + NET_DIM_GOING_LEFT, +}; + +enum { + NET_DIM_STATS_WORSE, + NET_DIM_STATS_SAME, + NET_DIM_STATS_BETTER, +}; + +enum { + NET_DIM_STEPPED, + NET_DIM_TOO_TIRED, + NET_DIM_ON_EDGE, +}; + +#define NET_DIM_PARAMS_NUM_PROFILES 5 +/* Adaptive moderation profiles */ +#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + +/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ +#define NET_DIM_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define NET_DIM_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +static const struct net_dim_cq_moder +profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_EQE_PROFILES, + NET_DIM_CQE_PROFILES, +}; + +static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode, + int ix) +{ + struct net_dim_cq_moder cq_moder; + + cq_moder = profile[cq_period_mode][ix]; + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} + +static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode) +{ + int default_profile_ix; + + if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE) + default_profile_ix = NET_DIM_DEF_PROFILE_CQE; + else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */ + default_profile_ix = NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_profile(rx_cq_period_mode, default_profile_ix); +} + +static inline bool net_dim_on_top(struct net_dim *dim) +{ + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + return true; + case NET_DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: /* NET_DIM_GOING_LEFT */ + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} + +static inline void net_dim_turn(struct net_dim *dim) +{ + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + break; + case NET_DIM_GOING_RIGHT: + dim->tune_state = NET_DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case NET_DIM_GOING_LEFT: + dim->tune_state = NET_DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} + +static inline int net_dim_step(struct net_dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return NET_DIM_TOO_TIRED; + + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + break; + case NET_DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return NET_DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case NET_DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return NET_DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return NET_DIM_STEPPED; +} + +static inline void net_dim_park_on_top(struct net_dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = NET_DIM_PARKING_ON_TOP; +} + +static inline void net_dim_park_tired(struct net_dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = NET_DIM_PARKING_TIRED; +} + +static inline void net_dim_exit_parking(struct net_dim *dim) +{ + dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT : + NET_DIM_GOING_RIGHT; + net_dim_step(dim); +} + +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + +static inline int net_dim_stats_compare(struct net_dim_stats *curr, + struct net_dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + return NET_DIM_STATS_SAME; +} + +static inline bool net_dim_decision(struct net_dim_stats *curr_stats, + struct net_dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != NET_DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case NET_DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case NET_DIM_GOING_RIGHT: + case NET_DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != NET_DIM_STATS_BETTER) + net_dim_turn(dim); + + if (net_dim_on_top(dim)) { + net_dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case NET_DIM_ON_EDGE: + net_dim_park_on_top(dim); + break; + case NET_DIM_TOO_TIRED: + net_dim_park_tired(dim); + break; + } + + break; + } + + if ((prev_state != NET_DIM_PARKING_ON_TOP) || + (dim->tune_state != NET_DIM_PARKING_ON_TOP)) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +static inline void net_dim_sample(u16 event_ctr, + u64 packets, + u64 bytes, + struct net_dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +#define NET_DIM_NEVENTS 64 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) + +static inline void net_dim_calc_stats(struct net_dim_sample *start, + struct net_dim_sample *end, + struct net_dim_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC, + delta_us); +} + +static inline void net_dim(struct net_dim *dim, + u16 event_ctr, + u64 packets, + u64 bytes) +{ + struct net_dim_sample end_sample; + struct net_dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case NET_DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), event_ctr, + dim->start_sample.event_ctr); + if (nevents < NET_DIM_NEVENTS) + break; + net_dim_sample(event_ctr, packets, bytes, &end_sample); + net_dim_calc_stats(&dim->start_sample, &end_sample, + &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = NET_DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + /* fall through */ + case NET_DIM_START_MEASURE: + net_dim_sample(event_ctr, packets, bytes, &dim->start_sample); + dim->state = NET_DIM_MEASURE_IN_PROGRESS; + break; + case NET_DIM_APPLY_NEW_PROFILE: + break; + } +} + +#endif /* NET_DIM_H */ -- cgit v1.2.3 From 8115b750dbcbeb3aa412f18b7a8b0aee4d35b079 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Tue, 9 Jan 2018 16:06:19 -0500 Subject: net/dim: use struct net_dim_sample as arg to net_dim Simplify the arguments net_dim() by formatting them into a struct net_dim_sample before calling the function. Signed-off-by: Andy Gospodarek Suggested-by: Tal Gilboa Acked-by: Tal Gilboa Acked-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/net_dim.h | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h index 741510fb74cb..1c7e45016120 100644 --- a/include/linux/net_dim.h +++ b/include/linux/net_dim.h @@ -342,21 +342,18 @@ static inline void net_dim_calc_stats(struct net_dim_sample *start, } static inline void net_dim(struct net_dim *dim, - u16 event_ctr, - u64 packets, - u64 bytes) + struct net_dim_sample end_sample) { - struct net_dim_sample end_sample; struct net_dim_stats curr_stats; u16 nevents; switch (dim->state) { case NET_DIM_MEASURE_IN_PROGRESS: - nevents = BIT_GAP(BITS_PER_TYPE(u16), event_ctr, + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, dim->start_sample.event_ctr); if (nevents < NET_DIM_NEVENTS) break; - net_dim_sample(event_ctr, packets, bytes, &end_sample); net_dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats); if (net_dim_decision(&curr_stats, dim)) { @@ -366,7 +363,6 @@ static inline void net_dim(struct net_dim *dim, } /* fall through */ case NET_DIM_START_MEASURE: - net_dim_sample(event_ctr, packets, bytes, &dim->start_sample); dim->state = NET_DIM_MEASURE_IN_PROGRESS; break; case NET_DIM_APPLY_NEW_PROFILE: -- cgit v1.2.3 From fd3ba21478d0ca40da2b71850a2cc447516bb7d8 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 9 Jan 2018 23:42:34 +0100 Subject: net: fix xdp_rxq_info build issue when CONFIG_SYSFS is not set The commit e817f85652c1 ("xdp: generic XDP handling of xdp_rxq_info") removed some ifdef CONFIG_SYSFS in net/core/dev.c, but forgot to remove the corresponding ifdef's in include/linux/netdevice.h. Fixes: e817f85652c1 ("xdp: generic XDP handling of xdp_rxq_info") Reported-by: Guenter Roeck Signed-off-by: Jesper Dangaard Brouer Tested-by: Guenter Roeck Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6f54c58b623b..ef7b348e8498 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1805,12 +1805,9 @@ struct net_device { /* Interface address info used in eth_type_trans() */ unsigned char *dev_addr; -#ifdef CONFIG_SYSFS struct netdev_rx_queue *_rx; - unsigned int num_rx_queues; unsigned int real_num_rx_queues; -#endif struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; -- cgit v1.2.3 From 1125b008711581a8962ee028e2982d7757093600 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 10 Jan 2018 15:06:14 +1100 Subject: tuntap: fix for "tuntap: XDP transmission" Fixes: fc72d1d54dd9 ("tuntap: XDP transmission") Signed-off-by: Stephen Rothwell Acked-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/if_tun.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 08e66827ad8e..c5b0a75a7812 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -42,11 +42,11 @@ static inline bool tun_is_xdp_buff(void *ptr) { return false; } -void *tun_xdp_to_ptr(void *ptr) +static inline void *tun_xdp_to_ptr(void *ptr) { return NULL; } -void *tun_ptr_to_xdp(void *ptr) +static inline void *tun_ptr_to_xdp(void *ptr) { return NULL; } -- cgit v1.2.3 From 540adea3809f61115d2a1ea4ed6e627613452ba1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 13 Jan 2018 02:55:03 +0900 Subject: error-injection: Separate error-injection from kprobe Since error-injection framework is not limited to be used by kprobes, nor bpf. Other kernel subsystems can use it freely for checking safeness of error-injection, e.g. livepatch, ftrace etc. So this separate error-injection framework from kprobes. Some differences has been made: - "kprobe" word is removed from any APIs/structures. - BPF_ALLOW_ERROR_INJECTION() is renamed to ALLOW_ERROR_INJECTION() since it is not limited for BPF too. - CONFIG_FUNCTION_ERROR_INJECTION is the config item of this feature. It is automatically enabled if the arch supports error injection feature for kprobe or ftrace etc. Signed-off-by: Masami Hiramatsu Reviewed-by: Josef Bacik Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 11 ----------- include/linux/error-injection.h | 21 +++++++++++++++++++++ include/linux/kprobes.h | 1 - include/linux/module.h | 6 +++--- 4 files changed, 24 insertions(+), 15 deletions(-) create mode 100644 include/linux/error-injection.h (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 44f26f6df8fc..3496977203a3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -613,15 +613,4 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto; void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -#if defined(__KERNEL__) && !defined(__ASSEMBLY__) -#ifdef CONFIG_BPF_KPROBE_OVERRIDE -#define BPF_ALLOW_ERROR_INJECTION(fname) \ -static unsigned long __used \ - __attribute__((__section__("_kprobe_error_inject_list"))) \ - _eil_addr_##fname = (unsigned long)fname; -#else -#define BPF_ALLOW_ERROR_INJECTION(fname) -#endif -#endif - #endif /* _LINUX_BPF_H */ diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h new file mode 100644 index 000000000000..130a67c50dac --- /dev/null +++ b/include/linux/error-injection.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ERROR_INJECTION_H +#define _LINUX_ERROR_INJECTION_H + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + +#include + +extern bool within_error_injection_list(unsigned long addr); + +#else /* !CONFIG_FUNCTION_ERROR_INJECTION */ + +#include +static inline bool within_error_injection_list(unsigned long addr) +{ + return false; +} + +#endif + +#endif /* _LINUX_ERROR_INJECTION_H */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 963fd364f3d6..9440a2fc8893 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -271,7 +271,6 @@ extern bool arch_kprobe_on_func_entry(unsigned long offset); extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); -extern bool within_kprobe_error_injection_list(unsigned long addr); struct kprobe_insn_cache { struct mutex mutex; diff --git a/include/linux/module.h b/include/linux/module.h index 548fa09fa806..792e51d83bda 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -476,9 +476,9 @@ struct module { unsigned int num_ctors; #endif -#ifdef CONFIG_BPF_KPROBE_OVERRIDE - unsigned int num_kprobe_ei_funcs; - unsigned long *kprobe_ei_funcs; +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + unsigned int num_ei_funcs; + unsigned long *ei_funcs; #endif } ____cacheline_aligned __randomize_layout; #ifndef MODULE_ARCH_INIT -- cgit v1.2.3 From 663faf9f7beeaca4ad0176bb96c776eed9dad0c5 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Sat, 13 Jan 2018 02:55:33 +0900 Subject: error-injection: Add injectable error types Add injectable error types for each error-injectable function. One motivation of error injection test is to find software flaws, mistakes or mis-handlings of expectable errors. If we find such flaws by the test, that is a program bug, so we need to fix it. But if the tester miss input the error (e.g. just return success code without processing anything), it causes unexpected behavior even if the caller is correctly programmed to handle any errors. That is not what we want to test by error injection. To clarify what type of errors the caller must expect for each injectable function, this introduces injectable error types: - EI_ETYPE_NULL : means the function will return NULL if it fails. No ERR_PTR, just a NULL. - EI_ETYPE_ERRNO : means the function will return -ERRNO if it fails. - EI_ETYPE_ERRNO_NULL : means the function will return -ERRNO (ERR_PTR) or NULL. ALLOW_ERROR_INJECTION() macro is expanded to get one of NULL, ERRNO, ERRNO_NULL to record the error type for each function. e.g. ALLOW_ERROR_INJECTION(open_ctree, ERRNO) This error types are shown in debugfs as below. ==== / # cat /sys/kernel/debug/error_injection/list open_ctree [btrfs] ERRNO io_ctl_init [btrfs] ERRNO ==== Signed-off-by: Masami Hiramatsu Reviewed-by: Josef Bacik Signed-off-by: Alexei Starovoitov --- include/linux/error-injection.h | 6 ++++++ include/linux/module.h | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h index 130a67c50dac..280c61ecbf20 100644 --- a/include/linux/error-injection.h +++ b/include/linux/error-injection.h @@ -7,6 +7,7 @@ #include extern bool within_error_injection_list(unsigned long addr); +extern int get_injectable_error_type(unsigned long addr); #else /* !CONFIG_FUNCTION_ERROR_INJECTION */ @@ -16,6 +17,11 @@ static inline bool within_error_injection_list(unsigned long addr) return false; } +static inline int get_injectable_error_type(unsigned long addr) +{ + return EI_ETYPE_NONE; +} + #endif #endif /* _LINUX_ERROR_INJECTION_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 792e51d83bda..9642d3116718 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -477,8 +478,8 @@ struct module { #endif #ifdef CONFIG_FUNCTION_ERROR_INJECTION + struct error_injection_entry *ei_funcs; unsigned int num_ei_funcs; - unsigned long *ei_funcs; #endif } ____cacheline_aligned __randomize_layout; #ifndef MODULE_ARCH_INIT -- cgit v1.2.3 From 7fdb61b44c0c95d00f6c856d9fb61a9f647bc85f Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Sun, 14 Jan 2018 12:33:15 +0100 Subject: net: sch: prio: Add offload ability to PRIO qdisc Add the ability to offload PRIO qdisc by using ndo_setup_tc. There are three commands for PRIO offloading: * TC_PRIO_REPLACE: handles set and tune * TC_PRIO_DESTROY: handles qdisc destroy * TC_PRIO_STATS: updates the qdiscs counters (given as reference) Like RED qdisc, the indication of whether PRIO is being offloaded is being set and updated as part of the dump function. It is so because the driver could decide to offload or not based on the qdisc parent, which could change without notifying the qdisc. Signed-off-by: Nogah Frankel Reviewed-by: Yuval Mintz Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ef7b348e8498..6d95477b962c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -780,6 +780,7 @@ enum tc_setup_type { TC_SETUP_BLOCK, TC_SETUP_QDISC_CBS, TC_SETUP_QDISC_RED, + TC_SETUP_QDISC_PRIO, }; /* These structures hold the attributes of bpf state that are being passed -- cgit v1.2.3 From 1110f3a9bcf394c06b81a98206aee9b6860653c8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 11 Jan 2018 20:29:03 -0800 Subject: bpf: add map_alloc_check callback .map_alloc callbacks contain a number of checks validating user- -provided map attributes against constraints of a particular map type. For offloaded maps we will need to check map attributes without actually allocating any memory on the host. Add a new callback for validating attributes before any memory is allocated. This callback can be selectively implemented by map types for sharing code with offloads, or simply to separate the logical steps of validation and allocation. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3496977203a3..263598619f90 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -25,6 +25,7 @@ struct bpf_map; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ + int (*map_alloc_check)(union bpf_attr *attr); struct bpf_map *(*map_alloc)(union bpf_attr *attr); void (*map_release)(struct bpf_map *map, struct file *map_file); void (*map_free)(struct bpf_map *map); -- cgit v1.2.3 From bd475643d74e8ed78bfd36d941053b0e45974e8e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 11 Jan 2018 20:29:06 -0800 Subject: bpf: add helper for copying attrs to struct bpf_map All map types reimplement the field-by-field copy of union bpf_attr members into struct bpf_map. Add a helper to perform this operation. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 263598619f90..c60ddfb34d41 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -378,6 +378,7 @@ void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); void *bpf_map_area_alloc(size_t size, int numa_node); void bpf_map_area_free(void *base); +void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); extern int sysctl_unprivileged_bpf_disabled; -- cgit v1.2.3 From 0a9c1991f285f829fd786fa2a9c824c2a3f87bc6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 11 Jan 2018 20:29:07 -0800 Subject: bpf: rename bpf_dev_offload -> bpf_prog_offload With map offload coming, we need to call program offload structure something less ambiguous. Pure rename, no functional changes. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c60ddfb34d41..9fff1ace1d8e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -200,7 +200,7 @@ struct bpf_prog_offload_ops { int insn_idx, int prev_insn_idx); }; -struct bpf_dev_offload { +struct bpf_prog_offload { struct bpf_prog *prog; struct net_device *netdev; void *dev_priv; @@ -230,7 +230,7 @@ struct bpf_prog_aux { #ifdef CONFIG_SECURITY void *security; #endif - struct bpf_dev_offload *offload; + struct bpf_prog_offload *offload; union { struct work_struct work; struct rcu_head rcu; -- cgit v1.2.3 From a38845729ea3985db5d2544ec3ef3dc8f6313a27 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 11 Jan 2018 20:29:09 -0800 Subject: bpf: offload: add map offload infrastructure BPF map offload follow similar path to program offload. At creation time users may specify ifindex of the device on which they want to create the map. Map will be validated by the kernel's .map_alloc_check callback and device driver will be called for the actual allocation. Map will have an empty set of operations associated with it (save for alloc and free callbacks). The real device callbacks are kept in map->offload->dev_ops because they have slightly different signatures. Map operations are called in process context so the driver may communicate with HW freely, msleep(), wait() etc. Map alloc and free callbacks are muxed via existing .ndo_bpf, and are always called with rtnl lock held. Maps and programs are guaranteed to be destroyed before .ndo_uninit (i.e. before unregister_netdev() returns). Map callbacks are invoked with bpf_devs_lock *read* locked, drivers must take care of exclusive locking if necessary. All offload-specific branches are marked with unlikely() (through bpf_map_is_dev_bound()), given that branch penalty will be negligible compared to IO anyway, and we don't want to penalize SW path unnecessarily. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 59 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/netdevice.h | 6 +++++ 2 files changed, 65 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9fff1ace1d8e..5c2c104dc2c5 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -74,6 +74,33 @@ struct bpf_map { char name[BPF_OBJ_NAME_LEN]; }; +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *map, + void *key, void *next_key); + int (*map_lookup_elem)(struct bpf_offloaded_map *map, + void *key, void *value); + int (*map_update_elem)(struct bpf_offloaded_map *map, + void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; +}; + +static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) +{ + return container_of(map, struct bpf_offloaded_map, map); +} + +extern const struct bpf_map_ops bpf_map_offload_ops; + /* function argument constraints */ enum bpf_arg_type { ARG_DONTCARE = 0, /* unused argument in helper function */ @@ -369,6 +396,7 @@ int __bpf_prog_charge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages); void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); +void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); @@ -556,6 +584,15 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog); int bpf_prog_offload_info_fill(struct bpf_prog_info *info, struct bpf_prog *prog); +int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); +int bpf_map_offload_update_elem(struct bpf_map *map, + void *key, void *value, u64 flags); +int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); +int bpf_map_offload_get_next_key(struct bpf_map *map, + void *key, void *next_key); + +bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); + #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); @@ -563,6 +600,14 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) { return aux->offload_requested; } + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return unlikely(map->ops == &bpf_map_offload_ops); +} + +struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); +void bpf_map_offload_map_free(struct bpf_map *map); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -574,6 +619,20 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) { return false; } + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return false; +} + +static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void bpf_map_offload_map_free(struct bpf_map *map) +{ +} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ef7b348e8498..0b3ab42d50fe 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -804,6 +804,8 @@ enum bpf_netdev_command { BPF_OFFLOAD_VERIFIER_PREP, BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY, + BPF_OFFLOAD_MAP_ALLOC, + BPF_OFFLOAD_MAP_FREE, }; struct bpf_prog_offload_ops; @@ -834,6 +836,10 @@ struct netdev_bpf { struct { struct bpf_prog *prog; } offload; + /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ + struct { + struct bpf_offloaded_map *offmap; + }; }; }; -- cgit v1.2.3 From 28b2e0d2cd132284ad69fcea4b7cf6b7d0662c2f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 10 Jan 2018 21:21:31 +0100 Subject: net: phy: remove parameter new_link from phy_mac_interrupt() I see two issues with parameter new_link: 1. It's not needed. See also phy_interrupt(), works w/o this parameter. phy_mac_interrupt sets the state to PHY_CHANGELINK and triggers the state machine which then calls phy_read_status. And phy_read_status updates the link state. 2. phy_mac_interrupt is used in interrupt context and getting the link state may sleep (at least when having to access the PHY registers via MDIO bus). So let's remove it. Signed-off-by: Heiner Kallweit Reviewed-by: Florian Fainelli Tested-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phy.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 135aba5c3d39..47715a3115b0 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -964,7 +964,7 @@ int phy_drivers_register(struct phy_driver *new_driver, int n, void phy_state_machine(struct work_struct *work); void phy_change(struct phy_device *phydev); void phy_change_work(struct work_struct *work); -void phy_mac_interrupt(struct phy_device *phydev, int new_link); +void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); void phy_trigger_machine(struct phy_device *phydev, bool sync); -- cgit v1.2.3 From 2290aefa2e90a43af8555ad6431d49de43259aa3 Mon Sep 17 00:00:00 2001 From: Franklin S Cooper Jr Date: Wed, 10 Jan 2018 16:25:18 +0530 Subject: can: dev: Add support for limiting configured bitrate Various CAN or CAN-FD IP may be able to run at a faster rate than what the transceiver the CAN node is connected to. This can lead to unexpected errors. However, CAN transceivers typically have fixed limitations and provide no means to discover these limitations at runtime. Therefore, add support for a can-transceiver node that can be reused by other CAN peripheral drivers to determine for both CAN and CAN-FD what the max bitrate that can be used. If the user tries to configure CAN to pass these maximum bitrates it will throw an error. Also add support for reading bitrate_max via the netlink interface. Reviewed-by: Suman Anna Signed-off-by: Franklin S Cooper Jr [nsekhar@ti.com: fix build error with !CONFIG_OF] Signed-off-by: Sekhar Nori Signed-off-by: Faiz Abbas Signed-off-by: Marc Kleine-Budde --- include/linux/can/dev.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 61f1cf2d9f44..055aaf5ed9af 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -46,6 +46,7 @@ struct can_priv { unsigned int bitrate_const_cnt; const u32 *data_bitrate_const; unsigned int data_bitrate_const_cnt; + u32 bitrate_max; struct can_clock clock; enum can_state state; @@ -166,6 +167,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); +#ifdef CONFIG_OF +void of_can_transceiver(struct net_device *dev); +#else +static inline void of_can_transceiver(struct net_device *dev) { } +#endif + struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); struct sk_buff *alloc_canfd_skb(struct net_device *dev, struct canfd_frame **cfd); -- cgit v1.2.3 From ac8322d806d15fd53fd5d70ceda61e05163392ee Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 12 Jan 2018 21:20:33 +0100 Subject: phy: add helpers for setting/clearing bits in PHY registers Based on the recent introduction of phy_modify add helpers for setting and clearing bits in PHY registers. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- include/linux/phy.h | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy.h b/include/linux/phy.h index 47715a3115b0..5a0c3e53e7c2 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -764,6 +764,55 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); +/** + * __phy_set_bits - Convenience function for setting bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to set + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return __phy_modify(phydev, regnum, 0, val); +} + +/** + * __phy_clear_bits - Convenience function for clearing bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to clear + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum, + u16 val) +{ + return __phy_modify(phydev, regnum, val, 0); +} + +/** + * phy_set_bits - Convenience function for setting bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to set + */ +static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return phy_modify(phydev, regnum, 0, val); +} + +/** + * phy_clear_bits - Convenience function for clearing bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to clear + */ +static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return phy_modify(phydev, regnum, val, 0); +} + /** * phy_interrupt_is_valid - Convenience function for testing a given PHY irq * @phydev: the phy_device struct -- cgit v1.2.3 From 2d83619de8a64703cd3b0bf21b3b85f081290bf3 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Wed, 17 Jan 2018 09:54:38 +0200 Subject: net/mlx5: Fix build break The latest merge between net and net-next introduced a complier assert in mlx5 driver. In hca_cap_bits older fields are kept along with newer fields that should have replaced them. Fixes: c02b3741eb99 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net") Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 94135c03d52b..acd829d8613b 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1035,8 +1035,6 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; - u8 disable_local_lb[0x1]; - u8 reserved_at_3e2[0x1]; u8 disable_local_lb_uc[0x1]; u8 disable_local_lb_mc[0x1]; u8 log_min_hairpin_wq_data_sz[0x5]; -- cgit v1.2.3 From fcfb126defda3cee3f1d9460dbe9a2ccac4fbd21 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Tue, 16 Jan 2018 16:05:19 -0800 Subject: bpf: add new jited info fields in bpf_dev_offload and bpf_prog_info For host JIT, there are "jited_len"/"bpf_func" fields in struct bpf_prog used by all host JIT targets to get jited image and it's length. While for offload, targets are likely to have different offload mechanisms that these info are kept in device private data fields. Therefore, BPF_OBJ_GET_INFO_BY_FD syscall needs an unified way to get JIT length and contents info for offload targets. One way is to introduce new callback to parse device private data then fill those fields in bpf_prog_info. This might be a little heavy, the other way is to add generic fields which will be initialized by all offload targets. This patch follow the second approach to introduce two new fields in struct bpf_dev_offload and teach bpf_prog_get_info_by_fd about them to fill correct jited_prog_len and jited_prog_insns in bpf_prog_info. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5c2c104dc2c5..025b1c2f8053 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -234,6 +234,8 @@ struct bpf_prog_offload { struct list_head offloads; bool dev_state; const struct bpf_prog_offload_ops *dev_ops; + void *jited_image; + u32 jited_len; }; struct bpf_prog_aux { -- cgit v1.2.3 From 50bd870a9e5cca9fcf5fb4c130c373643d7d9906 Mon Sep 17 00:00:00 2001 From: Yossef Efraim Date: Sun, 14 Jan 2018 11:39:10 +0200 Subject: xfrm: Add ESN support for IPSec HW offload This patch adds ESN support to IPsec device offload. Adding new xfrm device operation to synchronize device ESN. Signed-off-by: Yossef Efraim Signed-off-by: Shannon Nelson Signed-off-by: Steffen Klassert --- include/linux/netdevice.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ed0799a12bf2..540151875444 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -851,6 +851,7 @@ struct xfrmdev_ops { void (*xdo_dev_state_free) (struct xfrm_state *x); bool (*xdo_dev_offload_ok) (struct sk_buff *skb, struct xfrm_state *x); + void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); }; #endif -- cgit v1.2.3 From 52775b33bb5072fbc07b02c0cf4fe8da1f7ee7cd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 17 Jan 2018 19:13:28 -0800 Subject: bpf: offload: report device information about offloaded maps Tell user space about device on which the map was created. Unfortunate reality of user ABI makes sharing this code with program offload difficult but the information is the same. Signed-off-by: Jakub Kicinski Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 025b1c2f8053..66df387106de 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -586,6 +586,8 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog); int bpf_prog_offload_info_fill(struct bpf_prog_info *info, struct bpf_prog *prog); +int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); + int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); int bpf_map_offload_update_elem(struct bpf_map *map, void *key, void *value, u64 flags); -- cgit v1.2.3 From 5165674ff5022d8a76f3147d75cab1fecd529497 Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Wed, 17 Jan 2018 23:13:16 +0200 Subject: net/dim: Fix fixpoint divide exception in net_dim_stats_compare Helmut reported a bug about devision by zero while running traffic and doing physical cable pull test. When the cable unplugged the ppms become zero, so when dividing the current ppms by the previous ppms in the next dim iteration there is devision by zero. This patch prevent this division for both ppms and epms. Fixes: c3164d2fc48f ("net/mlx5e: Added BW check for DIM decision mechanism") Fixes: 4c4dbb4a7363 ("net/mlx5e: Move dynamic interrupt coalescing code to include/linux") Reported-by: Helmut Grauer Signed-off-by: Talat Batheesh Signed-off-by: Tal Gilboa Signed-off-by: David S. Miller --- include/linux/net_dim.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h index 1c7e45016120..bebeaad897cc 100644 --- a/include/linux/net_dim.h +++ b/include/linux/net_dim.h @@ -244,10 +244,17 @@ static inline int net_dim_stats_compare(struct net_dim_stats *curr, return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER : NET_DIM_STATS_WORSE; + if (!prev->ppms) + return curr->ppms ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER : NET_DIM_STATS_WORSE; + if (!prev->epms) + return NET_DIM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER : NET_DIM_STATS_WORSE; -- cgit v1.2.3 From ddae74ac103bd35616c2bde5dc4dd66e2519db7a Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 23 Nov 2017 18:19:23 +0200 Subject: net/mlx5: Vectorize the low level core hairpin object Enhance the hairpin setup code at the core to support a set of N (RQ,SQ) pairs. This will be later used by the caller to set RSS spreading among the different RQs. Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- include/linux/mlx5/transobj.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index a228310c1968..1bcd8d5562f0 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -78,14 +78,17 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); struct mlx5_hairpin_params { u8 log_data_size; u16 q_counter; + int num_channels; }; struct mlx5_hairpin { struct mlx5_core_dev *func_mdev; struct mlx5_core_dev *peer_mdev; - u32 rqn; - u32 sqn; + int num_channels; + + u32 *rqn; + u32 *sqn; }; struct mlx5_hairpin * -- cgit v1.2.3 From 4d533e0f86952eb97f66f2c9548313f6e51066c0 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 4 Jan 2018 12:26:21 +0200 Subject: net/mlx5: Enable setting hairpin queue size Allow to specify the size of the hairpin queues along with the packet buffer data size from the core setup code. If the driver doesn't provide this, the FW applies proper value that matches the provided data size and a FW chosen RQ stride size. Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 8 ++++++-- include/linux/mlx5/transobj.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index acd829d8613b..199bfcd2f2ce 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1031,7 +1031,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_hairpin_queues[0x5]; u8 reserved_at_3c8[0x3]; u8 log_max_hairpin_wq_data_sz[0x5]; - u8 reserved_at_3d0[0xb]; + u8 reserved_at_3d0[0x3]; + u8 log_max_hairpin_num_packets[0x5]; + u8 reserved_at_3d8[0x3]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; @@ -1172,7 +1174,9 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_at_120[0xb]; + u8 reserved_at_120[0x3]; + u8 log_hairpin_num_packets[0x5]; + u8 reserved_at_128[0x3]; u8 log_hairpin_data_sz[0x5]; u8 reserved_at_130[0x5]; diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 1bcd8d5562f0..7e8f281f8c00 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -77,6 +77,7 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); struct mlx5_hairpin_params { u8 log_data_size; + u8 log_num_packets; u16 q_counter; int num_channels; }; -- cgit v1.2.3 From babe2dbb28e780177ae2fba6a5640be1712f4af0 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 18 Jan 2018 13:31:38 +0100 Subject: device property: Introduce fwnode_get_mac_address() Until now there were two almost identical functions for obtaining MAC address - of_get_mac_address() and, more generic, device_get_mac_address(). However it is not uncommon, that the network interface is represented as a child of the actual controller, hence it is not associated directly to any struct device, required by the latter routine. This commit allows for getting the MAC address for children nodes in the ACPI world by introducing a new function - fwnode_get_mac_address(). This commit also changes device_get_mac_address() routine to be its wrapper, in order to prevent unnecessary duplication. Signed-off-by: Marcin Wojtas Acked-by: Rafael J. Wysocki Signed-off-by: David S. Miller --- include/linux/property.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/property.h b/include/linux/property.h index f6189a3ac63c..35620e023e20 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -279,6 +279,8 @@ int device_get_phy_mode(struct device *dev); void *device_get_mac_address(struct device *dev, char *addr, int alen); +void *fwnode_get_mac_address(struct fwnode_handle *fwnode, + char *addr, int alen); struct fwnode_handle *fwnode_graph_get_next_endpoint( const struct fwnode_handle *fwnode, struct fwnode_handle *prev); struct fwnode_handle * -- cgit v1.2.3 From b28f263b86709a1e26d7207112030e970abf4aab Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 18 Jan 2018 13:31:39 +0100 Subject: device property: Introduce fwnode_get_phy_mode() Until now there were two almost identical functions for obtaining network PHY mode - of_get_phy_mode() and, more generic, device_get_phy_mode(). However it is not uncommon, that the network interface is represented as a child of the actual controller, hence it is not associated directly to any struct device, required by the latter routine. This commit allows for getting the PHY mode for children nodes in the ACPI world by introducing a new function - fwnode_get_phy_mode(). This commit also changes device_get_phy_mode() routine to be its wrapper, in order to prevent unnecessary duplication. Signed-off-by: Marcin Wojtas Acked-by: Rafael J. Wysocki Signed-off-by: David S. Miller --- include/linux/property.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/property.h b/include/linux/property.h index 35620e023e20..9b133320072f 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -279,6 +279,7 @@ int device_get_phy_mode(struct device *dev); void *device_get_mac_address(struct device *dev, char *addr, int alen); +int fwnode_get_phy_mode(struct fwnode_handle *fwnode); void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen); struct fwnode_handle *fwnode_graph_get_next_endpoint( -- cgit v1.2.3 From 7c6c57f2ab2c5113844fe187a7c45c4bd76dc671 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 18 Jan 2018 13:31:40 +0100 Subject: device property: Introduce fwnode_irq_get() Until now there were two very similar functions allowing to get Linux IRQ number from ACPI handle (acpi_irq_get()) and OF node (of_irq_get()). The first one appeared to be used only as a subroutine of platform_irq_get(), which (in the generic code) limited IRQ obtaining from _CRS method only to nodes associated to kernel's struct platform_device. This patch introduces a new helper routine - fwnode_irq_get(), which allows to get the IRQ number directly from the fwnode to be used as common for OF/ACPI worlds. It is usable not only for the parents fwnodes, but also for the child nodes comprising their own _CRS methods with interrupts description. In order to be able o satisfy compilation with !CONFIG_ACPI and also simplify the new code, introduce a helper macro (ACPI_HANDLE_FWNODE), with which it is possible to reach an ACPI handle directly from its fwnode. Signed-off-by: Marcin Wojtas Signed-off-by: David S. Miller --- include/linux/acpi.h | 3 +++ include/linux/property.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index dc1ebfeeb5ec..f05b9b6cd43f 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -56,6 +56,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) +#define ACPI_HANDLE_FWNODE(fwnode) \ + acpi_device_handle(to_acpi_device_node(fwnode)) static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) { @@ -626,6 +628,7 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count) #define ACPI_COMPANION(dev) (NULL) #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) +#define ACPI_HANDLE_FWNODE(fwnode) (NULL) #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), struct fwnode_handle; diff --git a/include/linux/property.h b/include/linux/property.h index 9b133320072f..e05889fdbb14 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -103,6 +103,8 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev, struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); void fwnode_handle_put(struct fwnode_handle *fwnode); +int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index); + unsigned int device_get_child_node_count(struct device *dev); static inline bool device_property_read_bool(struct device *dev, -- cgit v1.2.3 From 3395de96ae5998692bd86024d0d5e4dd55cd6cc3 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 18 Jan 2018 13:31:41 +0100 Subject: device property: Allow iterating over available child fwnodes Implement a new helper function fwnode_get_next_available_child_node(), which enables obtaining next enabled child fwnode, which works on a similar basis to OF's of_get_next_available_child(). This commit also introduces a macro, thanks to which it is possible to iterate over the available fwnodes, using the new function described above. Signed-off-by: Marcin Wojtas Signed-off-by: David S. Miller --- include/linux/property.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/property.h b/include/linux/property.h index e05889fdbb14..5b0563ad79a5 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -83,11 +83,17 @@ struct fwnode_handle *fwnode_get_next_parent( struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_next_child_node( const struct fwnode_handle *fwnode, struct fwnode_handle *child); +struct fwnode_handle *fwnode_get_next_available_child_node( + const struct fwnode_handle *fwnode, struct fwnode_handle *child); #define fwnode_for_each_child_node(fwnode, child) \ for (child = fwnode_get_next_child_node(fwnode, NULL); child; \ child = fwnode_get_next_child_node(fwnode, child)) +#define fwnode_for_each_available_child_node(fwnode, child) \ + for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ + child = fwnode_get_next_available_child_node(fwnode, child)) + struct fwnode_handle *device_get_next_child_node( struct device *dev, struct fwnode_handle *child); -- cgit v1.2.3 From b2d3bcfa26a7a8de41f358a6cae8b848673b3c6e Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Thu, 18 Jan 2018 09:59:13 -0800 Subject: net: core: Expose number of link up/down transitions Expose the number of times the link has been going UP or DOWN, and update the "carrier_changes" counter to be the sum of these two events. While at it, also update the sysfs-class-net documentation to cover: carrier_changes (3.15), carrier_up_count (4.16) and carrier_down_count (4.16) Signed-off-by: David Decotigny [Florian: * rebase * add documentation * merge carrier_changes with up/down counters] Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/netdevice.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ed0799a12bf2..837e9cb7e358 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1680,8 +1680,6 @@ struct net_device { unsigned long base_addr; int irq; - atomic_t carrier_changes; - /* * Some hardware also needs these fields (state,dev_list, * napi_list,unreg_list,close_list) but they are not @@ -1719,6 +1717,10 @@ struct net_device { atomic_long_t tx_dropped; atomic_long_t rx_nohandler; + /* Stats to monitor link on/off, flapping */ + atomic_t carrier_up_count; + atomic_t carrier_down_count; + #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; -- cgit v1.2.3 From 3a19cfcce105e481b02b14265c5b40c6c10ef60a Mon Sep 17 00:00:00 2001 From: Ulrich Hecht Date: Mon, 22 Jan 2018 18:56:32 +0100 Subject: serdev: add method to set parity Adds serdev_device_set_parity() and an implementation for ttyport. The interface uses an enum with the values SERIAL_PARITY_NONE, SERIAL_PARITY_EVEN and SERIAL_PARITY_ODD. Signed-off-by: Ulrich Hecht Reviewed-by: Sebastian Reichel Reviewed-by: Johan Hovold Acked-by: Greg Kroah-Hartman Signed-off-by: Marcel Holtmann --- include/linux/serdev.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/serdev.h b/include/linux/serdev.h index d609e6dc5bad..a73d87b483b4 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -76,6 +76,12 @@ static inline struct serdev_device_driver *to_serdev_device_driver(struct device return container_of(d, struct serdev_device_driver, driver); } +enum serdev_parity { + SERDEV_PARITY_NONE, + SERDEV_PARITY_EVEN, + SERDEV_PARITY_ODD, +}; + /* * serdev controller structures */ @@ -86,6 +92,7 @@ struct serdev_controller_ops { int (*open)(struct serdev_controller *); void (*close)(struct serdev_controller *); void (*set_flow_control)(struct serdev_controller *, bool); + int (*set_parity)(struct serdev_controller *, enum serdev_parity); unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); void (*wait_until_sent)(struct serdev_controller *, long); int (*get_tiocm)(struct serdev_controller *); @@ -298,6 +305,9 @@ static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enabl return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS); } +int serdev_device_set_parity(struct serdev_device *serdev, + enum serdev_parity parity); + /* * serdev hooks into TTY core */ -- cgit v1.2.3 From 9e55e5d30f6c6ebaa1ada11814bc067a4fe3cfd9 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 22 Jan 2018 19:14:25 -0800 Subject: net: core: Fix kernel-doc for carrier_* attributes Fix the documentation warning: include/linux/netdevice.h:1939: warning: Excess struct member 'carrier_changes' description in 'net_device' Reported-by: kbuild test robot Fixes: b2d3bcfa26a7 ("net: core: Expose number of link up/down transitions") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 837e9cb7e358..581495f4e487 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1469,8 +1469,6 @@ enum netdev_priv_flags { * @base_addr: Device I/O address * @irq: Device IRQ number * - * @carrier_changes: Stats to monitor carrier on<->off transitions - * * @state: Generic network queuing layer state, see netdev_state_t * @dev_list: The global list of network devices * @napi_list: List entry used for polling NAPI devices @@ -1506,6 +1504,8 @@ enum netdev_priv_flags { * do not use this in drivers * @rx_nohandler: nohandler dropped packets by core network on * inactive devices, do not use this in drivers + * @carrier_up_count: Number of times the carrier has been up + * @carrier_down_count: Number of times the carrier has been down * * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, -- cgit v1.2.3 From 36fd633ec98acd2028585c22128fcaa3da6d5770 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 26 Jun 2017 13:19:16 -0400 Subject: net: separate SIOCGIFCONF handling from dev_ioctl() Only two of dev_ioctl() callers may pass SIOCGIFCONF to it. Separating that codepath from the rest of dev_ioctl() allows both to simplify dev_ioctl() itself (all other cases work with struct ifreq *) *and* seriously simplify the compat side of that beast: all it takes is passing to inet_gifconf() an extra argument - the size of individual records (sizeof(struct ifreq) or sizeof(struct compat_ifreq)). With dev_ifconf() called directly from sock_do_ioctl()/compat_dev_ifconf() that's easy to arrange. As the result, compat side of SIOCGIFCONF doesn't need any allocations, copy_in_user() back and forth, etc. Reviewed-by: Christoph Hellwig Signed-off-by: Al Viro --- include/linux/netdevice.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 581495f4e487..df5565d0369c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2761,7 +2761,8 @@ static inline bool dev_validate_header(const struct net_device *dev, return false; } -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); +typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, + int len, int size); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) { @@ -3315,6 +3316,7 @@ void netdev_rx_handler_unregister(struct net_device *dev); bool dev_valid_name(const char *name); int dev_ioctl(struct net *net, unsigned int cmd, void __user *); +int dev_ifconf(struct net *net, struct ifconf *, int); int dev_ethtool(struct net *net, struct ifreq *); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *, unsigned int flags); -- cgit v1.2.3 From 03aef17bb79b3dc02b1352ee2f55fca799dbad7f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 1 Jul 2017 07:53:12 -0400 Subject: devinet_ioctl(): take copyin/copyout to caller Reviewed-by: Christoph Hellwig Signed-off-by: Al Viro --- include/linux/inetdevice.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 1ac5bf95bfdd..e16fe7d44a71 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -173,7 +173,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) } int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); -int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); +int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); -- cgit v1.2.3 From 44c02a2c3dc55835e9f0d8ef73966406cd805001 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 5 Oct 2017 12:59:44 -0400 Subject: dev_ioctl(): move copyin/copyout to callers Signed-off-by: Al Viro --- include/linux/netdevice.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index df5565d0369c..24a62d590350 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3315,7 +3315,8 @@ int netdev_rx_handler_register(struct net_device *dev, void netdev_rx_handler_unregister(struct net_device *dev); bool dev_valid_name(const char *name); -int dev_ioctl(struct net *net, unsigned int cmd, void __user *); +int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, + bool *need_copyout); int dev_ifconf(struct net *net, struct ifconf *, int); int dev_ethtool(struct net *net, struct ifreq *); unsigned int dev_get_flags(const struct net_device *); -- cgit v1.2.3 From 5c59e564e46dcbab2ee7a4e9e0243562a39679a2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 1 Jul 2017 18:46:30 -0400 Subject: kill kernel_sock_ioctl() no users since 2014 Reviewed-by: Christoph Hellwig Signed-off-by: Al Viro --- include/linux/net.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/net.h b/include/linux/net.h index caeb159abda5..68acc54976bf 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -306,7 +306,6 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags); -int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); /* Routine returns the IP overhead imposed by a (caller-protected) socket. */ -- cgit v1.2.3 From b73042b8a28e2603ac178295ab96c876ba5a97a1 Mon Sep 17 00:00:00 2001 From: Lawrence Brakmo Date: Thu, 25 Jan 2018 16:14:08 -0800 Subject: bpf: Add write access to tcp_sock and sock fields This patch adds a macro, SOCK_OPS_SET_FIELD, for writing to struct tcp_sock or struct sock fields. This required adding a new field "temp" to struct bpf_sock_ops_kern for temporary storage that is used by sock_ops_convert_ctx_access. It is used to store and recover the contents of a register, so the register can be used to store the address of the sk. Since we cannot overwrite the dst_reg because it contains the pointer to ctx, nor the src_reg since it contains the value we want to store, we need an extra register to contain the address of the sk. Also adds the macro SOCK_OPS_GET_OR_SET_FIELD that calls one of the GET or SET macros depending on the value of the TYPE field. Signed-off-by: Lawrence Brakmo Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 425056c7f96c..daa5a676335f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1007,6 +1007,15 @@ struct bpf_sock_ops_kern { u32 replylong[4]; }; u32 is_fullsock; + u64 temp; /* temp and everything after is not + * initialized to 0 before calling + * the BPF program. New fields that + * should be initialized to 0 should + * be inserted before temp. + * temp is scratch storage used by + * sock_ops_convert_ctx_access + * as temporary storage of a register. + */ }; #endif /* __LINUX_FILTER_H__ */ -- cgit v1.2.3 From de525be2ca2734865d29c4b67ddd29913b214906 Mon Sep 17 00:00:00 2001 From: Lawrence Brakmo Date: Thu, 25 Jan 2018 16:14:09 -0800 Subject: bpf: Support passing args to sock_ops bpf function Adds support for passing up to 4 arguments to sock_ops bpf functions. It reusues the reply union, so the bpf_sock_ops structures are not increased in size. Signed-off-by: Lawrence Brakmo Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index daa5a676335f..20384c4bed25 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1003,6 +1003,7 @@ struct bpf_sock_ops_kern { struct sock *sk; u32 op; union { + u32 args[4]; u32 reply; u32 replylong[4]; }; -- cgit v1.2.3 From b13d880721729384757f235166068c315326f4a1 Mon Sep 17 00:00:00 2001 From: Lawrence Brakmo Date: Thu, 25 Jan 2018 16:14:10 -0800 Subject: bpf: Adds field bpf_sock_ops_cb_flags to tcp_sock Adds field bpf_sock_ops_cb_flags to tcp_sock and bpf_sock_ops. Its primary use is to determine if there should be calls to sock_ops bpf program at various points in the TCP code. The field is initialized to zero, disabling the calls. A sock_ops BPF program can set it, per connection and as necessary, when the connection is established. It also adds support for reading and writting the field within a sock_ops BPF program. Reading is done by accessing the field directly. However, writing is done through the helper function bpf_sock_ops_cb_flags_set, in order to return an error if a BPF program is trying to set a callback that is not supported in the current kernel (i.e. running an older kernel). The helper function returns 0 if it was able to set all of the bits set in the argument, a positive number containing the bits that could not be set, or -EINVAL if the socket is not a full TCP socket. Examples of where one could call the bpf program: 1) When RTO fires 2) When a packet is retransmitted 3) When the connection terminates 4) When a packet is sent 5) When a packet is received Signed-off-by: Lawrence Brakmo Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov --- include/linux/tcp.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4f93f0953c41..8f4c54986f97 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -335,6 +335,17 @@ struct tcp_sock { int linger2; + +/* Sock_ops bpf program related variables */ +#ifdef CONFIG_BPF + u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs + * values defined in uapi/linux/tcp.h + */ +#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) +#else +#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 +#endif + /* Receiver side RTT estimation */ struct { u32 rtt_us; -- cgit v1.2.3 From 5e581dad4fec0e6d062740dc35b8dc248b39d224 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 26 Jan 2018 23:33:38 +0100 Subject: bpf: make unknown opcode handling more robust Recent findings by syzcaller fixed in 7891a87efc71 ("bpf: arsh is not supported in 32 bit alu thus reject it") triggered a warning in the interpreter due to unknown opcode not being rejected by the verifier. The 'return 0' for an unknown opcode is really not optimal, since with BPF to BPF calls, this would go untracked by the verifier. Do two things here to improve the situation: i) perform basic insn sanity check early on in the verification phase and reject every non-uapi insn right there. The bpf_opcode_in_insntable() table reuses the same mapping as the jumptable in ___bpf_prog_run() sans the non-public mappings. And ii) in ___bpf_prog_run() we do need to BUG in the case where the verifier would ever create an unknown opcode due to some rewrites. Note that JITs do not have such issues since they would punt to interpreter in these situations. Moreover, the BPF_JIT_ALWAYS_ON would also help to avoid such unknown opcodes in the first place. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 20384c4bed25..276932d75975 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -688,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb) struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); +bool bpf_opcode_in_insntable(u8 code); + struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); -- cgit v1.2.3 From 406de7555424d93849166684d0bd172743d2a30c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 26 Jan 2018 01:36:27 +0200 Subject: ptr_ring: keep consumer_head valid at all times The comment near __ptr_ring_peek says: * If ring is never resized, and if the pointer is merely * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. but this was in fact never possible since consumer_head would sometimes point outside the ring. Refactor the code so that it's always pointing within a ring. Fixes: c5ad119fb6c09 ("net: sched: pfifo_fast use skb_array") Signed-off-by: Michael S. Tsirkin Acked-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 9ca1726ff963..5ebcdd40df99 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -248,22 +248,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) /* Fundamentally, what we want to do is update consumer * index and zero out the entry so producer can reuse it. * Doing it naively at each consume would be as simple as: - * r->queue[r->consumer++] = NULL; - * if (unlikely(r->consumer >= r->size)) - * r->consumer = 0; + * consumer = r->consumer; + * r->queue[consumer++] = NULL; + * if (unlikely(consumer >= r->size)) + * consumer = 0; + * r->consumer = consumer; * but that is suboptimal when the ring is full as producer is writing * out new entries in the same cache line. Defer these updates until a * batch of entries has been consumed. */ - int head = r->consumer_head++; + /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty + * to work correctly. + */ + int consumer_head = r->consumer_head; + int head = consumer_head++; /* Once we have processed enough entries invalidate them in * the ring all at once so producer can reuse their space in the ring. * We also do this when we reach end of the ring - not mandatory * but helps keep the implementation simple. */ - if (unlikely(r->consumer_head - r->consumer_tail >= r->batch || - r->consumer_head >= r->size)) { + if (unlikely(consumer_head - r->consumer_tail >= r->batch || + consumer_head >= r->size)) { /* Zero out entries in the reverse order: this way we touch the * cache line that producer might currently be reading the last; * producer won't make progress and touch other cache lines @@ -271,12 +277,13 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) */ while (likely(head >= r->consumer_tail)) r->queue[head--] = NULL; - r->consumer_tail = r->consumer_head; + r->consumer_tail = consumer_head; } - if (unlikely(r->consumer_head >= r->size)) { - r->consumer_head = 0; + if (unlikely(consumer_head >= r->size)) { + consumer_head = 0; r->consumer_tail = 0; } + r->consumer_head = consumer_head; } static inline void *__ptr_ring_consume(struct ptr_ring *r) -- cgit v1.2.3 From 8619d384eb5e9256a9d4ebe96c1832ac9b9049d6 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 26 Jan 2018 01:36:28 +0200 Subject: ptr_ring: clean up documentation The only function safe to call without locks is __ptr_ring_empty. Move documentation about lockless use there to make sure people do not try to use __ptr_ring_peek outside locks. Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 5ebcdd40df99..8594c7b37c15 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -169,21 +169,6 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) return ret; } -/* Note: callers invoking this in a loop must use a compiler barrier, - * for example cpu_relax(). Callers must take consumer_lock - * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. - * If ring is never resized, and if the pointer is merely - * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. - * However, if called outside the lock, and if some other CPU - * consumes ring entries at the same time, the value returned - * is not guaranteed to be correct. - * In this case - to avoid incorrectly detecting the ring - * as empty - the CPU consuming the ring entries is responsible - * for either consuming all ring entries until the ring is empty, - * or synchronizing with some other CPU and causing it to - * execute __ptr_ring_peek and/or consume the ring enteries - * after the synchronization point. - */ static inline void *__ptr_ring_peek(struct ptr_ring *r) { if (likely(r->size)) @@ -191,7 +176,24 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r) return NULL; } -/* See __ptr_ring_peek above for locking rules. */ +/* + * Test ring empty status without taking any locks. + * + * NB: This is only safe to call if ring is never resized. + * + * However, if some other CPU consumes ring entries at the same time, the value + * returned is not guaranteed to be correct. + * + * In this case - to avoid incorrectly detecting the ring + * as empty - the CPU consuming the ring entries is responsible + * for either consuming all ring entries until the ring is empty, + * or synchronizing with some other CPU and causing it to + * re-test __ptr_ring_empty and/or consume the ring enteries + * after the synchronization point. + * + * Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + */ static inline bool __ptr_ring_empty(struct ptr_ring *r) { return !__ptr_ring_peek(r); -- cgit v1.2.3 From a259df36d1fbf25f0e4f649fdb84e4527e5640ed Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 26 Jan 2018 01:36:29 +0200 Subject: ptr_ring: READ/WRITE_ONCE for __ptr_ring_empty Lockless __ptr_ring_empty requires that consumer head is read and written at once, atomically. Annotate accordingly to make sure compiler does it correctly. Switch locked callers to __ptr_ring_peek which does not support the lockless operation. Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 8594c7b37c15..9a72d8fec50e 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -196,7 +196,9 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r) */ static inline bool __ptr_ring_empty(struct ptr_ring *r) { - return !__ptr_ring_peek(r); + if (likely(r->size)) + return !r->queue[READ_ONCE(r->consumer_head)]; + return true; } static inline bool ptr_ring_empty(struct ptr_ring *r) @@ -285,7 +287,8 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) consumer_head = 0; r->consumer_tail = 0; } - r->consumer_head = consumer_head; + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, consumer_head); } static inline void *__ptr_ring_consume(struct ptr_ring *r) @@ -541,7 +544,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, goto done; } r->queue[head] = batch[--n]; - r->consumer_tail = r->consumer_head = head; + r->consumer_tail = head; + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, head); } done: -- cgit v1.2.3 From 84328342a70a44379dd73011a44c5f5e00481a42 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 26 Jan 2018 01:36:32 +0200 Subject: ptr_ring: disallow lockless __ptr_ring_full Similar to bcecb4bbf88a ("net: ptr_ring: otherwise safe empty checks can overrun array bounds") a lockless use of __ptr_ring_full might cause an out of bounds access. We can fix this, but it's easier to just disallow lockless __ptr_ring_full for now. Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 9a72d8fec50e..f17584680b79 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -45,9 +45,10 @@ struct ptr_ring { }; /* Note: callers invoking this in a loop must use a compiler barrier, - * for example cpu_relax(). If ring is ever resized, callers must hold - * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold - * producer_lock, the next call to __ptr_ring_produce may fail. + * for example cpu_relax(). + * + * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: + * see e.g. ptr_ring_full. */ static inline bool __ptr_ring_full(struct ptr_ring *r) { -- cgit v1.2.3 From 9fb582b67072bea6cbfe1aefc2be13c62c7681bf Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 26 Jan 2018 01:36:35 +0200 Subject: Revert "net: ptr_ring: otherwise safe empty checks can overrun array bounds" This reverts commit bcecb4bbf88aa03171c30652bca761cf27755a6b. If we try to allocate an extra entry as the above commit did, and when the requested size is UINT_MAX, addition overflows causing zero size to be passed to kmalloc(). kmalloc then returns ZERO_SIZE_PTR with a subsequent crash. Reported-by: syzbot+87678bcf753b44c39b67@syzkaller.appspotmail.com Cc: John Fastabend Signed-off-by: Michael S. Tsirkin Acked-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index f17584680b79..3a19ebdcef14 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -466,12 +466,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { - /* Allocate an extra dummy element at end of ring to avoid consumer head - * or produce head access past the end of the array. Possible when - * producer/consumer operations and __ptr_ring_peek operations run in - * parallel. - */ - return kcalloc(size + 1, sizeof(void *), gfp); + return kcalloc(size, sizeof(void *), gfp); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) -- cgit v1.2.3 From f417dc28185d7fdce567821862fcbf9222058cb7 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 26 Jan 2018 01:36:36 +0200 Subject: skb_array: use __ptr_ring_empty __skb_array_empty should use __ptr_ring_empty since that's the only legal lockless function. Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/linux/skb_array.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index c7addf37d119..a6b6e8bb3d7b 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -69,7 +69,7 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb */ static inline bool __skb_array_empty(struct skb_array *a) { - return !__ptr_ring_peek(&a->ring); + return __ptr_ring_empty(&a->ring); } static inline struct sk_buff *__skb_array_peek(struct skb_array *a) -- cgit v1.2.3 From a07d29c6724a19eab120b7a74a9bfd107d20f69a Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 26 Jan 2018 01:36:38 +0200 Subject: ptr_ring: prevent queue load/store tearing In theory compiler could tear queue loads or stores in two. It does not seem to be happening in practice but it seems easier to convert the cases where this would be a problem to READ/WRITE_ONCE than worry about it. Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/linux/ptr_ring.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 3a19ebdcef14..1883d6137e9b 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -114,7 +114,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ smp_wmb(); - r->queue[r->producer++] = ptr; + WRITE_ONCE(r->queue[r->producer++], ptr); if (unlikely(r->producer >= r->size)) r->producer = 0; return 0; @@ -173,7 +173,7 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) static inline void *__ptr_ring_peek(struct ptr_ring *r) { if (likely(r->size)) - return r->queue[r->consumer_head]; + return READ_ONCE(r->queue[r->consumer_head]); return NULL; } -- cgit v1.2.3 From 38e01b30563a5b5ade7b54e5d739d16a2b02fe82 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 25 Jan 2018 15:01:39 +0100 Subject: dev: advertise the new ifindex when the netns iface changes The goal is to let the user follow an interface that moves to another netns. CC: Jiri Benc CC: Christian Brauner Signed-off-by: Nicolas Dichtel Reviewed-by: Jiri Benc Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 62d508b31f56..0514cc36ac34 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -19,10 +19,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, - gfp_t flags, int *new_nsid); + gfp_t flags, int *new_nsid, int new_ifindex); struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, unsigned change, u32 event, - gfp_t flags, int *new_nsid); + gfp_t flags, int *new_nsid, + int new_ifindex); void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags); -- cgit v1.2.3 From 6a643ddb5624be7e0694d49f5765a8d41c1ab6d0 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 25 Jan 2018 18:26:22 -0800 Subject: net: introduce helper dev_change_tx_queue_len() This patch promotes the local change_tx_queue_len() to a core helper function, dev_change_tx_queue_len(), so that rtnetlink and net-sysfs could share the code. This also prepares for the following patch. Note, the -EFAULT in the original code doesn't make sense, we should propagate the errno from notifiers. Cc: John Fastabend Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cd46d3d63aa0..4c77f39ebd65 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3331,6 +3331,7 @@ int dev_get_alias(const struct net_device *, char *, size_t); int dev_change_net_namespace(struct net_device *, struct net *, const char *); int __dev_set_mtu(struct net_device *, int); int dev_set_mtu(struct net_device *, int); +int dev_change_tx_queue_len(struct net_device *, unsigned long); void dev_set_group(struct net_device *, int); int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); -- cgit v1.2.3