diff options
author | Eli Cohen <elic@nvidia.com> | 2022-05-18 16:38:03 +0300 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2022-05-31 12:44:22 -0400 |
commit | 1892a3d425bf525ac98d6d3534035e6ed2bfab50 (patch) | |
tree | 5efbb4b7d1a2dfd2d0e87292b21d149ebc09dfaf /drivers/vdpa | |
parent | a6a51adc6e8aafebfe0c4beb80e99694ea562b40 (diff) | |
download | linux-starfive-1892a3d425bf525ac98d6d3534035e6ed2bfab50.tar.gz linux-starfive-1892a3d425bf525ac98d6d3534035e6ed2bfab50.tar.bz2 linux-starfive-1892a3d425bf525ac98d6d3534035e6ed2bfab50.zip |
vdpa/mlx5: Add support for reading descriptor statistics
Implement the get_vq_stats calback of vdpa_config_ops to return the
statistics for a virtqueue.
The statistics are provided as vendor specific statistics where the
driver provides a pair of attribute name and attribute value.
Currently supported are received descriptors and completed descriptors.
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-6-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vdpa')
-rw-r--r-- | drivers/vdpa/mlx5/core/mlx5_vdpa.h | 2 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/net/mlx5_vnet.c | 149 |
2 files changed, 151 insertions, 0 deletions
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index daaf7b503677..44104093163b 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -61,6 +61,8 @@ struct mlx5_control_vq { struct vringh_kiov riov; struct vringh_kiov wiov; unsigned short head; + unsigned int received_desc; + unsigned int completed_desc; }; struct mlx5_vdpa_wq_ent { diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index e0de44000d92..2b815ef850c8 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -119,6 +119,7 @@ struct mlx5_vdpa_virtqueue { struct mlx5_vdpa_umem umem2; struct mlx5_vdpa_umem umem3; + u32 counter_set_id; bool initialized; int index; u32 virtq_id; @@ -818,6 +819,12 @@ static u16 get_features_12_3(u64 features) (!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6); } +static bool counters_supported(const struct mlx5_vdpa_dev *mvdev) +{ + return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) & + BIT_ULL(MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS); +} + static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) { int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in); @@ -872,6 +879,8 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); + if (counters_supported(&ndev->mvdev)) + MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); if (err) @@ -1135,6 +1144,47 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque return err; } +static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {}; + u32 out[MLX5_ST_SZ_DW(create_virtio_q_counters_out)] = {}; + void *cmd_hdr; + int err; + + if (!counters_supported(&ndev->mvdev)) + return 0; + + cmd_hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr); + + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); + + err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return 0; +} + +static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + u32 in[MLX5_ST_SZ_DW(destroy_virtio_q_counters_in)] = {}; + u32 out[MLX5_ST_SZ_DW(destroy_virtio_q_counters_out)] = {}; + + if (!counters_supported(&ndev->mvdev)) + return; + + MLX5_SET(destroy_virtio_q_counters_in, in, hdr.opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id); + MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); + MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS); + if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) + mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); +} + static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) { u16 idx = mvq->index; @@ -1162,6 +1212,10 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) if (err) goto err_connect; + err = counter_set_alloc(ndev, mvq); + if (err) + goto err_counter; + err = create_virtqueue(ndev, mvq); if (err) goto err_connect; @@ -1179,6 +1233,8 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) return 0; err_connect: + counter_set_dealloc(ndev, mvq); +err_counter: qp_destroy(ndev, &mvq->vqqp); err_vqqp: qp_destroy(ndev, &mvq->fwqp); @@ -1223,6 +1279,7 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue * suspend_vq(ndev, mvq); destroy_virtqueue(ndev, mvq); + counter_set_dealloc(ndev, mvq); qp_destroy(ndev, &mvq->vqqp); qp_destroy(ndev, &mvq->fwqp); cq_destroy(ndev, mvq->index); @@ -1659,6 +1716,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) if (read != sizeof(ctrl)) break; + cvq->received_desc++; switch (ctrl.class) { case VIRTIO_NET_CTRL_MAC: status = handle_ctrl_mac(mvdev, ctrl.cmd); @@ -1682,6 +1740,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) if (vringh_need_notify_iotlb(&cvq->vring)) vringh_notify(&cvq->vring); + cvq->completed_desc++; queue_work(mvdev->wq, &wqent->work); break; } @@ -2303,6 +2362,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status = 0; ndev->cur_num_vqs = 0; + ndev->mvdev.cvq.received_desc = 0; + ndev->mvdev.cvq.completed_desc = 0; memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); ndev->mvdev.actual_features = 0; ++mvdev->generation; @@ -2422,6 +2483,93 @@ static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev) return mvdev->actual_features; } +static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, + u64 *received_desc, u64 *completed_desc) +{ + u32 in[MLX5_ST_SZ_DW(query_virtio_q_counters_in)] = {}; + u32 out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {}; + void *cmd_hdr; + void *ctx; + int err; + + if (!counters_supported(&ndev->mvdev)) + return -EOPNOTSUPP; + + if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) + return -EAGAIN; + + cmd_hdr = MLX5_ADDR_OF(query_virtio_q_counters_in, in, hdr); + + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id); + + err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + ctx = MLX5_ADDR_OF(query_virtio_q_counters_out, out, counters); + *received_desc = MLX5_GET64(virtio_q_counters, ctx, received_desc); + *completed_desc = MLX5_GET64(virtio_q_counters, ctx, completed_desc); + return 0; +} + +static int mlx5_vdpa_get_vendor_vq_stats(struct vdpa_device *vdev, u16 idx, + struct sk_buff *msg, + struct netlink_ext_ack *extack) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq; + struct mlx5_control_vq *cvq; + u64 received_desc; + u64 completed_desc; + int err = 0; + + mutex_lock(&ndev->reslock); + if (!is_index_valid(mvdev, idx)) { + NL_SET_ERR_MSG_MOD(extack, "virtqueue index is not valid"); + err = -EINVAL; + goto out_err; + } + + if (idx == ctrl_vq_idx(mvdev)) { + cvq = &mvdev->cvq; + received_desc = cvq->received_desc; + completed_desc = cvq->completed_desc; + goto out; + } + + mvq = &ndev->vqs[idx]; + err = counter_set_query(ndev, mvq, &received_desc, &completed_desc); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "failed to query hardware"); + goto out_err; + } + +out: + err = -EMSGSIZE; + if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "received_desc")) + goto out_err; + + if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, received_desc, + VDPA_ATTR_PAD)) + goto out_err; + + if (nla_put_string(msg, VDPA_ATTR_DEV_VENDOR_ATTR_NAME, "completed_desc")) + goto out_err; + + if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, completed_desc, + VDPA_ATTR_PAD)) + goto out_err; + + err = 0; +out_err: + mutex_unlock(&ndev->reslock); + return err; +} + static const struct vdpa_config_ops mlx5_vdpa_ops = { .set_vq_address = mlx5_vdpa_set_vq_address, .set_vq_num = mlx5_vdpa_set_vq_num, @@ -2431,6 +2579,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = { .get_vq_ready = mlx5_vdpa_get_vq_ready, .set_vq_state = mlx5_vdpa_set_vq_state, .get_vq_state = mlx5_vdpa_get_vq_state, + .get_vendor_vq_stats = mlx5_vdpa_get_vendor_vq_stats, .get_vq_notification = mlx5_get_vq_notification, .get_vq_irq = mlx5_get_vq_irq, .get_vq_align = mlx5_vdpa_get_vq_align, |