From 7b13558f242747db90e52fdc510441a2ed0bafba Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Mon, 2 Jan 2017 11:37:38 +0200 Subject: net/mlx5: Fix offset naming for reserved fields in hca_cap_bits Fix offset for reserved fields. Fixes: 7486216b3a0b ("{net,IB}/mlx5: mlx5_ifc updates") Fixes: b4ff3a36d3e4 ("net/mlx5: Use offset based reserved field names in the IFC header file") Fixes: 7d5e14237a55 ("net/mlx5: Update mlx5_ifc hardware features") Signed-off-by: Max Gurtovoy Reviewed-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 57bec544e20a..4792c856531e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -826,9 +826,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_1a9[0x2]; u8 local_ca_ack_delay[0x5]; u8 port_module_event[0x1]; - u8 reserved_at_1b0[0x1]; + u8 reserved_at_1b1[0x1]; u8 ports_check[0x1]; - u8 reserved_at_1b2[0x1]; + u8 reserved_at_1b3[0x1]; u8 disable_link_up[0x1]; u8 beacon_led[0x1]; u8 port_type[0x2]; @@ -858,7 +858,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 compact_address_vector[0x1]; u8 striding_rq[0x1]; - u8 reserved_at_201[0x2]; + u8 reserved_at_202[0x2]; u8 ipoib_basic_offloads[0x1]; u8 reserved_at_205[0xa]; u8 drain_sigerr[0x1]; @@ -1009,10 +1009,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 rndv_offload_rc[0x1]; u8 rndv_offload_dc[0x1]; u8 log_tag_matching_list_sz[0x5]; - u8 reserved_at_5e8[0x3]; + u8 reserved_at_5f8[0x3]; u8 log_max_xrq[0x5]; - u8 reserved_at_5f0[0x200]; + u8 reserved_at_600[0x200]; }; enum mlx5_flow_destination_type { -- cgit v1.2.3 From bcda1aca7712539439d53dd5351c6223e03bf6e1 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:41 +0200 Subject: net/mlx5: Support new MR features This patch adds the following items to IFC file. 1. MLX5_MKC_ACCESS_MODE_KSM enum value for creating KSM memory keys. KSM access mode used when indirect MKey associated with fixed memory size entries. 2. null_mkey field that is used to indicate non-present KLM/KSM entries, where it causes the device to generate page fault event when trying to access it. 3. struct mlx5_ifc_cmd_hca_cap_bits capability bits indicating related value/field is supported: * fixed_buffer_size - MLX5_MKC_ACCESS_MODE_KSM * umr_extended_translation_offset - translation_offset_42_16 in UMR ctrl segment * null_mkey - null_mkey in QUERY_SPECIAL_CONTEXTS Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 4792c856531e..7c760e580268 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -782,11 +782,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_eq[0x4]; u8 max_indirection[0x8]; - u8 reserved_at_108[0x1]; + u8 fixed_buffer_size[0x1]; u8 log_max_mrw_sz[0x7]; u8 reserved_at_110[0x2]; u8 log_max_bsf_list_size[0x6]; - u8 reserved_at_118[0x2]; + u8 umr_extended_translation_offset[0x1]; + u8 null_mkey[0x1]; u8 log_max_klm_list_size[0x6]; u8 reserved_at_120[0xa]; @@ -2569,6 +2570,7 @@ enum { MLX5_MKC_ACCESS_MODE_PA = 0x0, MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_KLMS = 0x2, + MLX5_MKC_ACCESS_MODE_KSM = 0x3, }; struct mlx5_ifc_mkc_bits { @@ -3677,6 +3679,10 @@ struct mlx5_ifc_query_special_contexts_out_bits { u8 dump_fill_mkey[0x20]; u8 resd_lkey[0x20]; + + u8 null_mkey[0x20]; + + u8 reserved_at_a0[0x60]; }; struct mlx5_ifc_query_special_contexts_in_bits { -- cgit v1.2.3 From 3161625589c1d7c54e949d462f4d0c327664881a Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:42 +0200 Subject: IB/mlx5: Refactor UMR post send format * Update struct mlx5_wqe_umr_ctrl_seg. * Currenlty UMR send_flags aim only certain use cases: enabled/disable cached MR, modifying XLT for ODP. By making flags independent make UMR more flexible allowing arbitrary manipulations. * Since different UMR formats have different entry sizes UMR request should receive exact size of translation table update instead of number of entries. Rename field npages to xlt_size in struct mlx5_umr_wr and update relevant code accordingly. * Add support of length64 bit. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/qp.h | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 0aacb2a7480d..693811e0cb24 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -292,10 +292,14 @@ struct mlx5_wqe_data_seg { struct mlx5_wqe_umr_ctrl_seg { u8 flags; u8 rsvd0[3]; - __be16 klm_octowords; - __be16 bsf_octowords; + __be16 xlt_octowords; + union { + __be16 xlt_offset; + __be16 bsf_octowords; + }; __be64 mkey_mask; - u8 rsvd1[32]; + __be32 xlt_offset_47_16; + u8 rsvd1[28]; }; struct mlx5_seg_set_psv { @@ -389,6 +393,10 @@ struct mlx5_bsf { struct mlx5_bsf_inl m_inl; }; +struct mlx5_mtt { + __be64 ptag; +}; + struct mlx5_klm { __be32 bcount; __be32 key; -- cgit v1.2.3 From 7d0cc6edcc7011133c45f62a7796a98b8cb5da0f Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:44 +0200 Subject: IB/mlx5: Add MR cache for large UMR regions In this change we turn mlx5_ib_update_mtt() into generic mlx5_ib_update_xlt() to perfrom HCA translation table modifiactions supporting both atomic and process contexts and not limited by number of modified entries. Using this function we increase preallocated MRs up to 16GB. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/driver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 0ae55361e674..ec52f3b50bf5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -959,7 +959,7 @@ enum { }; enum { - MAX_MR_CACHE_ENTRIES = 16, + MAX_MR_CACHE_ENTRIES = 21, }; enum { -- cgit v1.2.3 From 223cdc72429079aaf72511d2677b5d6584866313 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:45 +0200 Subject: net/mlx5: Update PAGE_FAULT_RESUME layout Update PAGE_FAULT_RESUME command layout. Three bit fields describing page fault: rdma, rdma_write, req_res gave 8 possible combinations, while only a few were legal. Now they are interpreted as three-bit type field, where former legal combinations turns into corresponding types and unused were added as new types. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 7c760e580268..608dc988b3d6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4775,12 +4775,11 @@ struct mlx5_ifc_page_fault_resume_in_bits { u8 error[0x1]; u8 reserved_at_41[0x4]; - u8 rdma[0x1]; - u8 read_write[0x1]; - u8 req_res[0x1]; - u8 qpn[0x18]; + u8 page_fault_type[0x3]; + u8 wq_number[0x18]; - u8 reserved_at_60[0x20]; + u8 reserved_at_60[0x8]; + u8 token[0x18]; }; struct mlx5_ifc_nop_out_bits { -- cgit v1.2.3 From d9aaed838765e28234cb700c7d1ac975cadf28c9 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:46 +0200 Subject: {net,IB}/mlx5: Refactor page fault handling * Update page fault event according to last specification. * Separate code path for page fault EQ, completion EQ and async EQ. * Move page fault handling work queue from mlx5_ib static variable into mlx5_core page fault EQ. * Allocate memory to store ODP event dynamically as the events arrive, since in atomic context - use mempool. * Make mlx5_ib page fault handler run in process context. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/device.h | 6 ++- include/linux/mlx5/driver.h | 97 ++++++++++++++++++++++++++++++++++++++++++--- include/linux/mlx5/qp.h | 44 -------------------- 3 files changed, 96 insertions(+), 51 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 9f489365b3d3..3ccaeff15a80 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -534,7 +534,9 @@ struct mlx5_eqe_page_fault { __be16 wqe_index; u16 reserved2; __be16 packet_length; - u8 reserved3[12]; + __be32 token; + u8 reserved4[8]; + __be32 pftype_wq; } __packed wqe; struct { __be32 r_key; @@ -542,9 +544,9 @@ struct mlx5_eqe_page_fault { __be16 packet_length; __be32 rdma_op_len; __be64 rdma_va; + __be32 pftype_token; } __packed rdma; } __packed; - __be32 flags_qpn; } __packed; struct mlx5_eqe_vport_change { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ec52f3b50bf5..b52d07491fe7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -83,6 +84,7 @@ enum { MLX5_EQ_VEC_PAGES = 0, MLX5_EQ_VEC_CMD = 1, MLX5_EQ_VEC_ASYNC = 2, + MLX5_EQ_VEC_PFAULT = 3, MLX5_EQ_VEC_COMP_BASE, }; @@ -178,6 +180,14 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; +enum mlx5_eq_type { + MLX5_EQ_TYPE_COMP, + MLX5_EQ_TYPE_ASYNC, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + MLX5_EQ_TYPE_PF, +#endif +}; + struct mlx5_uuar_info { struct mlx5_uar *uars; int num_uars; @@ -333,6 +343,14 @@ struct mlx5_eq_tasklet { spinlock_t lock; }; +struct mlx5_eq_pagefault { + struct work_struct work; + /* Pagefaults lock */ + spinlock_t lock; + struct workqueue_struct *wq; + mempool_t *pool; +}; + struct mlx5_eq { struct mlx5_core_dev *dev; __be32 __iomem *doorbell; @@ -346,7 +364,13 @@ struct mlx5_eq { struct list_head list; int index; struct mlx5_rsc_debug *dbg; - struct mlx5_eq_tasklet tasklet_ctx; + enum mlx5_eq_type type; + union { + struct mlx5_eq_tasklet tasklet_ctx; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct mlx5_eq_pagefault pf_ctx; +#endif + }; }; struct mlx5_core_psv { @@ -377,6 +401,8 @@ struct mlx5_core_mkey { u32 pd; }; +#define MLX5_24BIT_MASK ((1 << 24) - 1) + enum mlx5_res_type { MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, @@ -411,6 +437,9 @@ struct mlx5_eq_table { struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct mlx5_eq pfault_eq; +#endif int num_comp_vectors; /* protect EQs list */ @@ -497,6 +526,7 @@ struct mlx5_fc_stats { struct mlx5_eswitch; struct mlx5_lag; +struct mlx5_pagefault; struct mlx5_rl_entry { u32 rate; @@ -601,6 +631,14 @@ struct mlx5_priv { struct mlx5_rl_table rl_table; struct mlx5_port_module_event_stats pme_stats; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + void (*pfault)(struct mlx5_core_dev *dev, + void *context, + struct mlx5_pagefault *pfault); + void *pfault_ctx; + struct srcu_struct pfault_srcu; +#endif }; enum mlx5_device_state { @@ -619,6 +657,50 @@ enum mlx5_pci_status { MLX5_PCI_STATUS_ENABLED, }; +enum mlx5_pagefault_type_flags { + MLX5_PFAULT_REQUESTOR = 1 << 0, + MLX5_PFAULT_WRITE = 1 << 1, + MLX5_PFAULT_RDMA = 1 << 2, +}; + +/* Contains the details of a pagefault. */ +struct mlx5_pagefault { + u32 bytes_committed; + u32 token; + u8 event_subtype; + u8 type; + union { + /* Initiator or send message responder pagefault details. */ + struct { + /* Received packet size, only valid for responders. */ + u32 packet_size; + /* + * Number of resource holding WQE, depends on type. + */ + u32 wq_num; + /* + * WQE index. Refers to either the send queue or + * receive queue, according to event_subtype. + */ + u16 wqe_index; + } wqe; + /* RDMA responder pagefault details */ + struct { + u32 r_key; + /* + * Received packet size, minimal size page fault + * resolution required for forward progress. + */ + u32 packet_size; + u32 rdma_op_len; + u64 rdma_va; + } rdma; + }; + + struct mlx5_eq *eq; + struct work_struct work; +}; + struct mlx5_td { struct list_head tirs_list; u32 tdn; @@ -879,15 +961,13 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); -#endif void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, struct mlx5_uar *uar); + int nent, u64 mask, const char *name, + struct mlx5_uar *uar, enum mlx5_eq_type type); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev); @@ -926,6 +1006,10 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, + u32 wq_num, u8 type, int error); +#endif int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); @@ -974,6 +1058,9 @@ struct mlx5_interface { void (*detach)(struct mlx5_core_dev *dev, void *context); void (*event)(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param); + void (*pfault)(struct mlx5_core_dev *dev, + void *context, + struct mlx5_pagefault *pfault); void * (*get_dev)(void *context); int protocol; struct list_head list; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 693811e0cb24..9ed775f5cb66 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -50,9 +50,6 @@ #define MLX5_BSF_APPTAG_ESCAPE 0x1 #define MLX5_BSF_APPREF_ESCAPE 0x2 -#define MLX5_QPN_BITS 24 -#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1) - enum mlx5_qp_optpar { MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MLX5_QP_OPTPAR_RRE = 1 << 1, @@ -418,46 +415,9 @@ struct mlx5_stride_block_ctrl_seg { __be16 num_entries; }; -enum mlx5_pagefault_flags { - MLX5_PFAULT_REQUESTOR = 1 << 0, - MLX5_PFAULT_WRITE = 1 << 1, - MLX5_PFAULT_RDMA = 1 << 2, -}; - -/* Contains the details of a pagefault. */ -struct mlx5_pagefault { - u32 bytes_committed; - u8 event_subtype; - enum mlx5_pagefault_flags flags; - union { - /* Initiator or send message responder pagefault details. */ - struct { - /* Received packet size, only valid for responders. */ - u32 packet_size; - /* - * WQE index. Refers to either the send queue or - * receive queue, according to event_subtype. - */ - u16 wqe_index; - } wqe; - /* RDMA responder pagefault details */ - struct { - u32 r_key; - /* - * Received packet size, minimal size page fault - * resolution required for forward progress. - */ - u32 packet_size; - u32 rdma_op_len; - u64 rdma_va; - } rdma; - }; -}; - struct mlx5_core_qp { struct mlx5_core_rsc_common common; /* must be first */ void (*event) (struct mlx5_core_qp *, int); - void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *); int qpn; struct mlx5_rsc_debug *dbg; int pid; @@ -557,10 +517,6 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev); void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, - u8 context, int error); -#endif int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, struct mlx5_core_qp *rq); void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, -- cgit v1.2.3 From 17d2f88f92ce39b348f125f6b2e6eeb6b0906ac7 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:47 +0200 Subject: IB/mlx5: Add ODP atomics support Handle ODP atomic operations. When initiator of RDMA atomic operation use ODP MR to provide source data handle pagefault properly. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/mlx5_ifc.h | 2 +- include/linux/mlx5/qp.h | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 608dc988b3d6..15f896781966 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -328,7 +328,7 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 receive[0x1]; u8 write[0x1]; u8 read[0x1]; - u8 reserved_at_4[0x1]; + u8 atomic[0x1]; u8 srq_receive[0x1]; u8 reserved_at_6[0x1a]; }; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 9ed775f5cb66..219c699c17b7 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -212,6 +212,7 @@ struct mlx5_wqe_ctrl_seg { #define MLX5_WQE_CTRL_OPCODE_MASK 0xff #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 +#define MLX5_WQE_AV_EXT 0x80000000 enum { MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, @@ -242,6 +243,23 @@ struct mlx5_wqe_masked_atomic_seg { __be64 compare_mask; }; +struct mlx5_base_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + union { + __be16 rlid; + __be16 udp_sport; + }; +}; + struct mlx5_av { union { struct { -- cgit v1.2.3 From aa8e08d2f523501c40b0e70f1c4ecacb97195931 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Mon, 2 Jan 2017 11:37:48 +0200 Subject: IB/mlx5: Improve MR check Add "type" field to mlx5_core MKEY struct. Check whether page fault happens on MKEY corresponding to MR. Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/driver.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index b52d07491fe7..cfa49bca009c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -394,11 +394,17 @@ struct mlx5_core_sig_ctx { u32 sigerr_count; }; +enum { + MLX5_MKEY_MR = 1, + MLX5_MKEY_MW, +}; + struct mlx5_core_mkey { u64 iova; u64 size; u32 key; u32 pd; + u32 type; }; #define MLX5_24BIT_MASK ((1 << 24) - 1) -- cgit v1.2.3