Lines Matching refs:mvq

397 		       struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)  in qp_prepare()  argument
403 vqp = fw ? &mvq->fwqp : &mvq->vqqp; in qp_prepare()
423 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare()
442 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
453 vqp = &mvq->vqqp; in qp_create()
454 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
470 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
488 rx_post(vqp, mvq->num_ent); in qp_create()
534 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num) in mlx5_vdpa_handle_completions() argument
536 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions()
539 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
540 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions()
546 rx_post(&mvq->vqqp, num); in mlx5_vdpa_handle_completions()
553 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq); in mlx5_vdpa_cq_comp() local
554 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp()
558 while (!mlx5_vdpa_poll_one(&mvq->cq)) { in mlx5_vdpa_cq_comp()
560 if (num > mvq->num_ent / 2) { in mlx5_vdpa_cq_comp()
567 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
573 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
575 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp()
580 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create() local
584 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_create()
642 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in cq_create()
657 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy() local
659 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_destroy()
669 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
680 *umemp = &mvq->umem1; in set_umem_size()
685 *umemp = &mvq->umem2; in set_umem_size()
690 *umemp = &mvq->umem3; in set_umem_size()
693 (*umemp)->size = p_a * mvq->num_ent + p_b; in set_umem_size()
701 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
711 set_umem_size(ndev, mvq, num, &umem); in create_umem()
751 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
759 umem = &mvq->umem1; in umem_destroy()
762 umem = &mvq->umem2; in umem_destroy()
765 umem = &mvq->umem3; in umem_destroy()
777 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
783 err = create_umem(ndev, mvq, num); in umems_create()
791 umem_destroy(ndev, mvq, num); in umems_create()
796 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
801 umem_destroy(ndev, mvq, num); in umems_destroy()
838 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
848 err = umems_create(ndev, mvq); in create_virtqueue()
865 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in create_virtqueue()
866 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in create_virtqueue()
872 if (vq_is_tx(mvq->index)) in create_virtqueue()
876 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); in create_virtqueue()
877 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); in create_virtqueue()
878 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); in create_virtqueue()
881 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in create_virtqueue()
882 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in create_virtqueue()
883 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in create_virtqueue()
885 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); in create_virtqueue()
886 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); in create_virtqueue()
887 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); in create_virtqueue()
888 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size); in create_virtqueue()
889 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); in create_virtqueue()
890 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); in create_virtqueue()
893 MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id); in create_virtqueue()
899 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT; in create_virtqueue()
901 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in create_virtqueue()
908 umems_destroy(ndev, mvq); in create_virtqueue()
912 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
919 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); in destroy_virtqueue()
924 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
927 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in destroy_virtqueue()
928 umems_destroy(ndev, mvq); in destroy_virtqueue()
931 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_rqpn() argument
933 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; in get_rqpn()
936 static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_qpn() argument
938 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; in get_qpn()
1036 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
1044 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
1053 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
1057 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
1061 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
1065 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1069 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1073 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1077 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1081 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1090 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1108 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in query_virtqueue()
1141 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1150 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE) in modify_virtqueue()
1153 if (!is_valid_state_change(mvq->fw_state, state)) in modify_virtqueue()
1164 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in modify_virtqueue()
1174 mvq->fw_state = state; in modify_virtqueue()
1179 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_alloc() argument
1199 mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in counter_set_alloc()
1204 static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_dealloc() argument
1213 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id); in counter_set_dealloc()
1217 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1220 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1222 u16 idx = mvq->index; in setup_vq()
1225 if (!mvq->num_ent) in setup_vq()
1228 if (mvq->initialized) in setup_vq()
1231 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1235 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1239 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1243 err = connect_qps(ndev, mvq); in setup_vq()
1247 err = counter_set_alloc(ndev, mvq); in setup_vq()
1251 err = create_virtqueue(ndev, mvq); in setup_vq()
1255 if (mvq->ready) { in setup_vq()
1256 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1264 mvq->initialized = true; in setup_vq()
1268 counter_set_dealloc(ndev, mvq); in setup_vq()
1270 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1272 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1278 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1282 if (!mvq->initialized) in suspend_vq()
1285 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in suspend_vq()
1288 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1291 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1295 mvq->avail_idx = attr.available_index; in suspend_vq()
1296 mvq->used_idx = attr.used_index; in suspend_vq()
1307 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1309 if (!mvq->initialized) in teardown_vq()
1312 suspend_vq(ndev, mvq); in teardown_vq()
1313 destroy_virtqueue(ndev, mvq); in teardown_vq()
1314 counter_set_dealloc(ndev, mvq); in teardown_vq()
1315 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1316 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1317 cq_destroy(ndev, mvq->index); in teardown_vq()
1318 mvq->initialized = false; in teardown_vq()
1928 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_kick_vq() local
1941 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
1942 if (unlikely(!mvq->ready)) in mlx5_vdpa_kick_vq()
1953 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_address() local
1965 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
1966 mvq->desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
1967 mvq->device_addr = device_area; in mlx5_vdpa_set_vq_address()
1968 mvq->driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
1976 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_num() local
1981 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
1982 mvq->num_ent = num; in mlx5_vdpa_set_vq_num()
2020 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_ready() local
2034 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2036 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
2038 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in mlx5_vdpa_set_vq_ready()
2046 mvq->ready = ready; in mlx5_vdpa_set_vq_ready()
2068 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_state() local
2078 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2079 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { in mlx5_vdpa_set_vq_state()
2084 mvq->used_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2085 mvq->avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2093 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_get_vq_state() local
2105 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2110 if (!mvq->initialized) { in mlx5_vdpa_get_vq_state()
2115 state->split.avail_index = mvq->used_idx; in mlx5_vdpa_get_vq_state()
2119 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
2239 struct mlx5_vdpa_virtqueue *mvq; in teardown_virtqueues() local
2243 mvq = &ndev->vqs[i]; in teardown_virtqueues()
2244 if (!mvq->initialized) in teardown_virtqueues()
2247 teardown_vq(ndev, mvq); in teardown_virtqueues()
2326 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
2328 struct mlx5_vq_restore_info *ri = &mvq->ri; in save_channel_info()
2332 if (mvq->initialized) { in save_channel_info()
2333 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
2340 ri->ready = mvq->ready; in save_channel_info()
2341 ri->num_ent = mvq->num_ent; in save_channel_info()
2342 ri->desc_addr = mvq->desc_addr; in save_channel_info()
2343 ri->device_addr = mvq->device_addr; in save_channel_info()
2344 ri->driver_addr = mvq->driver_addr; in save_channel_info()
2370 struct mlx5_vdpa_virtqueue *mvq; in restore_channels_info() local
2377 mvq = &ndev->vqs[i]; in restore_channels_info()
2378 ri = &mvq->ri; in restore_channels_info()
2382 mvq->avail_idx = ri->avail_index; in restore_channels_info()
2383 mvq->used_idx = ri->used_index; in restore_channels_info()
2384 mvq->ready = ri->ready; in restore_channels_info()
2385 mvq->num_ent = ri->num_ent; in restore_channels_info()
2386 mvq->desc_addr = ri->desc_addr; in restore_channels_info()
2387 mvq->device_addr = ri->device_addr; in restore_channels_info()
2388 mvq->driver_addr = ri->driver_addr; in restore_channels_info()
2737 static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in counter_set_query() argument
2749 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in counter_set_query()
2757 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id); in counter_set_query()
2775 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_get_vendor_vq_stats() local
2795 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
2796 err = counter_set_query(ndev, mvq, &received_desc, &completed_desc); in mlx5_vdpa_get_vendor_vq_stats()
2839 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_suspend() local
2847 mvq = &ndev->vqs[i]; in mlx5_vdpa_suspend()
2848 suspend_vq(ndev, mvq); in mlx5_vdpa_suspend()
2955 struct mlx5_vdpa_virtqueue *mvq; in init_mvqs() local
2959 mvq = &ndev->vqs[i]; in init_mvqs()
2960 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in init_mvqs()
2961 mvq->index = i; in init_mvqs()
2962 mvq->ndev = ndev; in init_mvqs()
2963 mvq->fwqp.fw = true; in init_mvqs()
2964 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in init_mvqs()
2967 mvq = &ndev->vqs[i]; in init_mvqs()
2968 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in init_mvqs()
2969 mvq->index = i; in init_mvqs()
2970 mvq->ndev = ndev; in init_mvqs()