summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorOr Gerlitz <ogerlitz@mellanox.com>2015-06-25 17:45:38 +0300
committerDoug Ledford <dledford@redhat.com>2015-07-14 13:20:14 -0400
commit43bfb9729ea88d46e3f4d3ad7b17106c7b071fcb (patch)
tree7723ad040d935dc50a06a4beb394d8f6d634478d /drivers
parentcb1ff431c3dd904cda37d6d07d4a3ea29840d621 (diff)
IB/mlx4: Fix use of flow-counters for process_mad
For IB links, reading HCA flow counters through iboe_process_mad() should be used when mlx4_ib_process_mad() is invoked only for VFs PMA queries and exactly nothing else. Fixes: 7193a141eb74 ('IB/mlx4: Set VF to read from QP counters') Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 9a810e302f6b..68b3dfa922bf 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -860,22 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct mlx4_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
+ enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
- switch (rdma_port_get_link_layer(ibdev, port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
- if (!mlx4_is_slave(dev->dev))
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- case IB_LINK_LAYER_ETHERNET:
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- default:
- return -EINVAL;
+ /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
+ * queries, should be called only by VFs and for that specific purpose
+ */
+ if (link == IB_LINK_LAYER_INFINIBAND) {
+ if (mlx4_is_slave(dev->dev) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
}
+
+ if (link == IB_LINK_LAYER_ETHERNET)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return -EINVAL;
}
static void send_handler(struct ib_mad_agent *agent,