summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-24 13:56:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-24 13:56:26 -0700
commit5dedb9f3bd5bcb186313ea0c0cff8f2c525d4122 (patch)
tree88514547a6e95176e7a9dc2fbdc7fa7c1bec3107 /drivers/net/ethernet
parentddb03448274b95bff6df2a2f1a74d7eb4be529d3 (diff)
parent089117e1ad265625b523a4168f77f2521b18fd32 (diff)
Merge tag 'rdma-for-3.6' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA changes from Roland Dreier: - Updates to the qib low-level driver - First chunk of changes for SR-IOV support for mlx4 IB - RDMA CM support for IPv6-only binding - Other misc cleanups and fixes Fix up some add-add conflicts in include/linux/mlx4/device.h and drivers/net/ethernet/mellanox/mlx4/main.c * tag 'rdma-for-3.6' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (30 commits) IB/qib: checkpatch fixes IB/qib: Add congestion control agent implementation IB/qib: Reduce sdma_lock contention IB/qib: Fix an incorrect log message IB/qib: Fix QP RCU sparse warnings mlx4: Put physical GID and P_Key table sizes in mlx4_phys_caps struct and paravirtualize them mlx4_core: Allow guests to have IB ports mlx4_core: Implement mechanism for reserved Q_Keys net/mlx4_core: Free ICM table in case of error IB/cm: Destroy idr as part of the module init error flow mlx4_core: Remove double function declarations IB/mlx4: Fill the masked_atomic_cap attribute in query device IB/mthca: Fill in sq_sig_type in query QP IB/mthca: Warning about event for non-existent QPs should show event type IB/qib: Fix sparse RCU warnings in qib_keys.c net/mlx4_core: Initialize IB port capabilities for all slaves mlx4: Use port management change event instead of smp_snoop IB/qib: RCU locking for MR validation IB/qib: Avoid returning EBUSY from MR deregister IB/qib: Fix UC MR refs for immediate operations ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h67
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c11
9 files changed, 206 insertions, 130 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 69ba57270481..a52922ed85c1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -131,7 +131,7 @@ static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
}
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
- enum mlx4_dev_event event, int port)
+ enum mlx4_dev_event event, unsigned long port)
{
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
struct mlx4_en_priv *priv;
@@ -156,7 +156,8 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
return;
- mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
+ mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
+ (int) port);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd48337cbfc0..99a04648fab0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -83,6 +83,15 @@ enum {
(1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
(1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
+static u64 get_async_ev_mask(struct mlx4_dev *dev)
+{
+ u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
+
+ return async_ev_mask;
+}
+
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
@@ -474,6 +483,11 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
+ case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
+ (unsigned long) eqe);
+ break;
+
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
@@ -957,7 +971,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.have_irq = 1;
}
- err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
@@ -997,7 +1011,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
- mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
+ mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
mlx4_free_irqs(dev);
@@ -1041,7 +1055,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
mlx4_cmd_use_polling(dev);
/* Map the new eq to handle all asyncronous events */
- err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[i].eqn);
if (err) {
mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
@@ -1055,7 +1069,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
}
/* Return to default */
- mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 1d70657058a5..c69648487321 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -109,6 +109,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support",
[48] = "Counters support",
+ [59] = "Port management change event support",
};
int i;
@@ -174,6 +175,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
+#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
@@ -183,25 +185,44 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+#define QUERY_FUNC_CAP_FMR_FLAG 0x80
+#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
+#define QUERY_FUNC_CAP_FLAG_ETH 0x80
+
+/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
+#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
+#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
+
+#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
+
if (vhcr->op_modifier == 1) {
field = vhcr->in_modifier;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
- field = 0; /* ensure fvl bit is not set */
+ field = 0;
+ /* ensure force vlan and force mac bits are not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ /* ensure that phy_wqe_gid bit is not set */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+
} else if (vhcr->op_modifier == 0) {
- field = 1 << 7; /* enable only ethernet interface */
+ /* enable rdma and ethernet interfaces */
+ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = dev->caps.num_ports;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
- size = 0; /* no PF behavious is set for now */
+ size = 0; /* no PF behaviour is set for now */
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+ field = 0; /* protected FMR support not available as yet */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
+
size = dev->caps.num_qps;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
@@ -254,11 +275,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
outbox = mailbox->buf;
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
- if (!(field & (1 << 7))) {
- mlx4_err(dev, "The host doesn't support eth interface\n");
+ if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
+ mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
err = -EPROTONOSUPPORT;
goto out;
}
+ func_cap->flags = field;
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field;
@@ -297,17 +319,27 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
if (err)
goto out;
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- if (field & (1 << 7)) {
- mlx4_err(dev, "VLAN is enforced on this port\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
+ if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
- if (field & (1 << 6)) {
- mlx4_err(dev, "Force mac is enabled on this port\n");
- err = -EPROTONOSUPPORT;
- goto out;
+ if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+ mlx4_err(dev, "Force mac is enabled on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+ } else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+ if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+ mlx4_err(dev, "phy_wqe_gid is "
+ "enforced on this ib port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
}
MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@@ -707,14 +739,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
{
u64 def_mac;
u8 port_type;
+ u16 short_field;
int err;
-#define MLX4_PORT_SUPPORT_IB (1 << 0)
-#define MLX4_PORT_SUGGEST_TYPE (1 << 3)
-#define MLX4_PORT_DEFAULT_SENSE (1 << 4)
-#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \
- ~MLX4_PORT_SUGGEST_TYPE & \
- ~MLX4_PORT_DEFAULT_SENSE)
+#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
+#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
+#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
@@ -730,20 +760,58 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_GET(port_type, outbox->buf,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
- /* Allow only Eth port, no link sensing allowed */
- port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
-
- /* check eth is enabled for this port */
- if (!(port_type & 2))
- mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+ /* No link sensing allowed */
+ port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
+ /* set port type to currently operating port type */
+ port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
MLX4_PUT(outbox->buf, port_type,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+ short_field = 1; /* slave max gids */
+ MLX4_PUT(outbox->buf, short_field,
+ QUERY_PORT_CUR_MAX_GID_OFFSET);
+
+ short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
+ MLX4_PUT(outbox->buf, short_field,
+ QUERY_PORT_CUR_MAX_PKEY_OFFSET);
}
return err;
}
+int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
+ int *gid_tbl_len, int *pkey_tbl_len)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u16 field;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ outbox = mailbox->buf;
+
+ MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
+ *gid_tbl_len = field;
+
+ MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
+ *pkey_tbl_len = field;
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
+
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -890,11 +958,12 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+ dev->caps.function = lg;
+
if (mlx4_is_slave(dev))
goto out;
- MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
- dev->caps.function = lg;
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
@@ -975,9 +1044,12 @@ int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
if (err)
return err;
- /* for slaves, zero out everything except FW version */
+ /* for slaves, set pci PPF ID to invalid and zero out everything
+ * else except FW version */
outbuf[0] = outbuf[1] = 0;
memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
+ outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index a9ade1c3cad5..88b7b3e75ab1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -413,6 +413,8 @@ err:
mlx4_free_icm(dev, table->icm[i], use_coherent);
}
+ kfree(table->icm);
+
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index b10c07a1dc1a..19e4efc0b342 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -81,13 +81,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, int nobj, int reserved,
int use_lowmem, int use_coherent);
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
-void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
-int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
- int start, int end);
-void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
- int start, int end);
static inline void mlx4_icm_first(struct mlx4_icm *icm,
struct mlx4_icm_iter *iter)
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index b4e9f6f5cc04..116895ac8b35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -115,7 +115,8 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
}
EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
-void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
+ unsigned long param)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_device_context *dev_ctx;
@@ -125,7 +126,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int por
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event)
- dev_ctx->intf->event(dev, dev_ctx->context, type, port);
+ dev_ctx->intf->event(dev, dev_ctx->context, type, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 42645166bae2..e8f8ebb4ae65 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -218,6 +218,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
+ dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i];
+ dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
+ /* set gid and pkey table operating lengths by default
+ * to non-sriov values */
dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
@@ -312,29 +316,19 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
/* if only ETH is supported - assign ETH */
if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
- /* if only IB is supported,
- * assign IB only if SRIOV is off*/
+ /* if only IB is supported, assign IB */
else if (dev->caps.supported_type[i] ==
- MLX4_PORT_TYPE_IB) {
- if (dev->flags & MLX4_FLAG_SRIOV)
- dev->caps.port_type[i] =
- MLX4_PORT_TYPE_NONE;
- else
- dev->caps.port_type[i] =
- MLX4_PORT_TYPE_IB;
- /* if IB and ETH are supported,
- * first of all check if SRIOV is on */
- } else if (dev->flags & MLX4_FLAG_SRIOV)
- dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ MLX4_PORT_TYPE_IB)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
else {
- /* In non-SRIOV mode, we set the port type
- * according to user selection of port type,
- * if usere selected none, take the FW hint */
- if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+ /* if IB and ETH are supported, we set the port
+ * type according to user selection of port type;
+ * if user selected none, take the FW hint */
+ if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
else
- dev->caps.port_type[i] = port_type_array[i-1];
+ dev->caps.port_type[i] = port_type_array[i - 1];
}
}
/*
@@ -415,6 +409,23 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
return ret;
}
+int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
+{
+ u32 qk = MLX4_RESERVED_QKEY_BASE;
+ if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
+ qpn < dev->caps.sqp_start)
+ return -EINVAL;
+
+ if (qpn >= dev->caps.base_tunnel_sqpn)
+ /* tunnel qp */
+ qk += qpn - dev->caps.base_tunnel_sqpn;
+ else
+ qk += qpn - dev->caps.sqp_start;
+ *qkey = qk;
+ return 0;
+}
+EXPORT_SYMBOL(mlx4_get_parav_qkey);
+
int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -515,8 +526,13 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
- for (i = 1; i <= dev->caps.num_ports; ++i)
+ for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.port_mask[i] = dev->caps.port_type[i];
+ if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
+ &dev->caps.gid_table_len[i],
+ &dev->caps.pkey_table_len[i]))
+ return -ENODEV;
+ }
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
@@ -553,7 +569,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
for (port = 1; port <= dev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(dev, port);
dev->caps.port_type[port] = port_types[port - 1];
- err = mlx4_SET_PORT(dev, port);
+ err = mlx4_SET_PORT(dev, port, -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, "
"aborting\n", port);
@@ -739,7 +755,7 @@ static ssize_t set_port_ib_mtu(struct device *dev,
mlx4_unregister_device(mdev);
for (port = 1; port <= mdev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(mdev, port);
- err = mlx4_SET_PORT(mdev, port);
+ err = mlx4_SET_PORT(mdev, port, -1);
if (err) {
mlx4_err(mdev, "Failed to set port %d, "
"aborting\n", port);
@@ -1192,6 +1208,17 @@ err:
return -EIO;
}
+static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
+{
+ int i;
+
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ dev->caps.gid_table_len[i] = 1;
+ dev->caps.pkey_table_len[i] =
+ dev->phys_caps.pkey_phys_table_len[i] - 1;
+ }
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1231,6 +1258,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ if (mlx4_is_master(dev))
+ mlx4_parav_master_pf_caps(dev);
+
priv->fs_hash_mode = MLX4_FS_L2_HASH;
switch (priv->fs_hash_mode) {
@@ -1522,12 +1552,24 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
"with caps = 0\n", port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+ /* initialize per-slave default ib port capabilities */
+ if (mlx4_is_master(dev)) {
+ int i;
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i == mlx4_master_func_num(dev))
+ continue;
+ priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
+ ib_port_default_caps;
+ }
+ }
+
if (mlx4_is_mfunc(dev))
dev->caps.port_ib_mtu[port] = IB_MTU_2048;
else
dev->caps.port_ib_mtu[port] = IB_MTU_4096;
- err = mlx4_SET_PORT(dev, port);
+ err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
+ dev->caps.pkey_table_len[port] : -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d2c436b10fbf..59ebc0339638 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -351,66 +351,6 @@ struct mlx4_srq_context {
__be64 db_rec_addr;
};
-struct mlx4_eqe {
- u8 reserved1;
- u8 type;
- u8 reserved2;
- u8 subtype;
- union {
- u32 raw[6];
- struct {
- __be32 cqn;
- } __packed comp;
- struct {
- u16 reserved1;
- __be16 token;
- u32 reserved2;
- u8 reserved3[3];
- u8 status;
- __be64 out_param;
- } __packed cmd;
- struct {
- __be32 qpn;
- } __packed qp;
- struct {
- __be32 srqn;
- } __packed srq;
- struct {
- __be32 cqn;
- u32 reserved1;
- u8 reserved2[3];
- u8 syndrome;
- } __packed cq_err;
- struct {
- u32 reserved1[2];
- __be32 port;
- } __packed port_change;
- struct {
- #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
- u32 reserved;
- u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
- } __packed comm_channel_arm;
- struct {
- u8 port;
- u8 reserved[3];
- __be64 mac;
- } __packed mac_update;
- struct {
- u8 port;
- } __packed sw_event;
- struct {
- __be32 slave_id;
- } __packed flr_event;
- struct {
- __be16 current_temperature;
- __be16 warning_threshold;
- } __packed warming;
- } event;
- u8 slave_id;
- u8 reserved3[2];
- u8 owner;
-} __packed;
-
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
@@ -902,7 +842,8 @@ void mlx4_catas_init(void);
int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
-void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
+ unsigned long param);
struct mlx4_dev_cap;
struct mlx4_init_hca_param;
@@ -1043,7 +984,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
-int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
/* resource tracker functions*/
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource resource_type,
@@ -1086,6 +1027,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
+int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
+ int *gid_tbl_len, int *pkey_tbl_len);
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 028833ffc56f..e36dd0f2fa73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -775,14 +775,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
enum {
MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
+ MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
MLX4_CHANGE_PORT_VL_CAP = 21,
MLX4_CHANGE_PORT_MTU_CAP = 22,
};
-int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
{
struct mlx4_cmd_mailbox *mailbox;
- int err, vl_cap;
+ int err, vl_cap, pkey_tbl_flag = 0;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
return 0;
@@ -795,11 +796,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
+ if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
+ pkey_tbl_flag = 1;
+ ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
+ }
+
/* IB VL CAP enum isn't used by the firmware, just numerical values */
for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
((__be32 *) mailbox->buf)[0] = cpu_to_be32(
(1 << MLX4_CHANGE_PORT_MTU_CAP) |
(1 << MLX4_CHANGE_PORT_VL_CAP) |
+ (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
(vl_cap << MLX4_SET_PORT_VL_CAP));
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,