summaryrefslogtreecommitdiff
path: root/include/linux/mlx5/driver.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 07:54:15 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 07:54:15 -0800
commitce38aa9cbed3d109355b0169b520362c409c0541 (patch)
tree621511c34edd22ac30ca12f78f0d478245b4ccd7 /include/linux/mlx5/driver.h
parent69973b830859bc6529a7a0468ba0d80ee5117826 (diff)
parentd84701ecbcd6ad63faa7a9c18ad670d1c4d561c0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Platform regulatory domain support for ath10k, from Bartosz Markowski. 2) Centralize min/max MTU checking, thus removing tons of duplicated code all of the the various drivers. From Jarod Wilson. 3) Support ingress actions in act_mirred, from Shmulik Ladkani. 4) Improve device adjacency tracking, from David Ahern. 5) Add support for LED triggers on PHY link state changes, from Zach Brown. 6) Improve UDP socket memory accounting, from Paolo Abeni. 7) Set SK_MEM_QUANTUM to a fixed size of 4096, instead of PAGE_SIZE. From Eric Dumazet. 8) Collapse TCP SKBs at retransmit time even if the right side SKB has frags. Also from Eric Dumazet. 9) Add IP_RECVFRAGSIZE and IPV6_RECVFRAGSIZE cmsgs, from Willem de Bruijn. 10) Support routing by UID, from Lorenzo Colitti. 11) Handle L3 domain binding (ie. VRF) for RAW sockets, from David Ahern. 12) tcp_get_info() can run lockless, from Eric Dumazet. 13) 4-tuple UDP hashing in SFC driver, from Edward Cree. 14) Avoid reorders in GRO code, from Eric Dumazet. 15) IPV6 Segment Routing support, from David Lebrun. 16) Support MPLS push and pop for L3 packets in openvswitch, from Jiri Benc. 17) Add LRU datastructure support for BPF, Martin KaFai Lau. 18) VF support in liquidio driver, from Raghu Vatsavayi. 19) Multiqueue support in alx driver, from Tobias Regnery. 20) Networking cgroup BPF support, from Daniel Mack. 21) TCP chronograph measurements, from Francis Yan. 22) XDP support for qed driver, from Yuval Mintz. 23) BPF based lwtunnels, from Thomas Graf. 24) Consistent FIB dumping to offloading drivers, from Ido Schimmel. 25) Many optimizations for UDP under high load, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits) netfilter: nft_counter: rework atomic dump and reset e1000: use disable_hardirq() for e1000_netpoll() i40e: don't truncate match_method assignment net: ethernet: ti: netcp: add support of cpts net: phy: phy drivers should not set SUPPORTED_[Asym_]Pause net: l2tp: ppp: change PPPOL2TP_MSG_* => L2TP_MSG_* net: l2tp: deprecate PPPOL2TP_MSG_* in favour of L2TP_MSG_* net: l2tp: export debug flags to UAPI net: ethernet: stmmac: remove private tx queue lock net: ethernet: sxgbe: remove private tx queue lock net: bridge: shorten ageing time on topology change net: bridge: add helper to set topology change net: bridge: add helper to offload ageing time net: nicvf: use new api ethtool_{get|set}_link_ksettings net: ethernet: ti: cpsw: sync rates for channels in dual emac mode net: ethernet: ti: cpsw: re-split res only when speed is changed net: ethernet: ti: cpsw: combine budget and weight split and check net: ethernet: ti: cpsw: don't start queue twice net: ethernet: ti: cpsw: use same macros to get active slave net: mvneta: select GENERIC_ALLOCATOR ...
Diffstat (limited to 'include/linux/mlx5/driver.h')
-rw-r--r--include/linux/mlx5/driver.h60
1 files changed, 53 insertions, 7 deletions
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ecc451d89ccd..0ae55361e674 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -104,6 +104,8 @@ enum {
enum {
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
+ MLX5_REG_DCBX_PARAM = 0x4020,
+ MLX5_REG_DCBX_APP = 0x4021,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -121,6 +123,12 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MPCNT = 0x9051,
+};
+
+enum mlx5_dcbx_oper_mode {
+ MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
+ MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
enum {
@@ -208,7 +216,7 @@ struct mlx5_cmd_first {
struct mlx5_cmd_msg {
struct list_head list;
- struct cache_ent *cache;
+ struct cmd_msg_cache *parent;
u32 len;
struct mlx5_cmd_first first;
struct mlx5_cmd_mailbox *next;
@@ -228,17 +236,17 @@ struct mlx5_cmd_debug {
u16 outlen;
};
-struct cache_ent {
+struct cmd_msg_cache {
/* protect block chain allocations
*/
spinlock_t lock;
struct list_head head;
+ unsigned int max_inbox_size;
+ unsigned int num_ent;
};
-struct cmd_msg_cache {
- struct cache_ent large;
- struct cache_ent med;
-
+enum {
+ MLX5_NUM_COMMAND_CACHES = 5,
};
struct mlx5_cmd_stats {
@@ -281,7 +289,7 @@ struct mlx5_cmd {
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct pci_pool *pool;
struct mlx5_cmd_debug dbg;
- struct cmd_msg_cache cache;
+ struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
};
@@ -310,6 +318,13 @@ struct mlx5_buf {
u8 page_shift;
};
+struct mlx5_frag_buf {
+ struct mlx5_buf_list *frags;
+ int npages;
+ int size;
+ u8 page_shift;
+};
+
struct mlx5_eq_tasklet {
struct list_head list;
struct list_head process_list;
@@ -498,6 +513,31 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
+enum port_module_event_status_type {
+ MLX5_MODULE_STATUS_PLUGGED = 0x1,
+ MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+ MLX5_MODULE_STATUS_ERROR = 0x3,
+ MLX5_MODULE_STATUS_NUM = 0x3,
+};
+
+enum port_module_event_error_type {
+ MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
+ MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
+ MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
+ MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
+ MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
+ MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
+ MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN,
+ MLX5_MODULE_EVENT_ERROR_NUM,
+};
+
+struct mlx5_port_module_event_stats {
+ u64 status_counters[MLX5_MODULE_STATUS_NUM];
+ u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
@@ -559,6 +599,8 @@ struct mlx5_priv {
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
+
+ struct mlx5_port_module_event_stats pme_stats;
};
enum mlx5_device_state {
@@ -787,6 +829,9 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_frag_buf *buf, int node);
+void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -831,6 +876,7 @@ void mlx5_unregister_debugfs(void);
int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING