Lines Matching refs:mvq

327 		       struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)  in qp_prepare()  argument
333 vqp = fw ? &mvq->fwqp : &mvq->vqqp; in qp_prepare()
353 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare()
372 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
383 vqp = &mvq->vqqp; in qp_create()
384 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
400 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
418 rx_post(vqp, mvq->num_ent); in qp_create()
464 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num) in mlx5_vdpa_handle_completions() argument
466 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions()
467 rx_post(&mvq->vqqp, num); in mlx5_vdpa_handle_completions()
468 if (mvq->event_cb.callback) in mlx5_vdpa_handle_completions()
469 mvq->event_cb.callback(mvq->event_cb.private); in mlx5_vdpa_handle_completions()
474 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq); in mlx5_vdpa_cq_comp() local
475 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp()
479 while (!mlx5_vdpa_poll_one(&mvq->cq)) { in mlx5_vdpa_cq_comp()
481 if (num > mvq->num_ent / 2) { in mlx5_vdpa_cq_comp()
488 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
494 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
496 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp()
501 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create() local
505 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_create()
564 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in cq_create()
579 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy() local
581 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_destroy()
591 static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in umem_size() argument
602 *umemp = &mvq->umem1; in umem_size()
607 *umemp = &mvq->umem2; in umem_size()
612 *umemp = &mvq->umem3; in umem_size()
615 return p_a * mvq->num_ent + p_b; in umem_size()
623 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
634 size = umem_size(ndev, mvq, num, &umem); in create_umem()
678 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
686 umem = &mvq->umem1; in umem_destroy()
689 umem = &mvq->umem2; in umem_destroy()
692 umem = &mvq->umem3; in umem_destroy()
704 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
710 err = create_umem(ndev, mvq, num); in umems_create()
718 umem_destroy(ndev, mvq, num); in umems_create()
723 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
728 umem_destroy(ndev, mvq, num); in umems_destroy()
759 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
769 err = umems_create(ndev, mvq); in create_virtqueue()
786 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in create_virtqueue()
792 if (vq_is_tx(mvq->index)) in create_virtqueue()
796 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); in create_virtqueue()
797 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); in create_virtqueue()
798 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); in create_virtqueue()
801 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in create_virtqueue()
802 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in create_virtqueue()
803 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in create_virtqueue()
805 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); in create_virtqueue()
806 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); in create_virtqueue()
807 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); in create_virtqueue()
808 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size); in create_virtqueue()
809 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); in create_virtqueue()
810 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size); in create_virtqueue()
820 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in create_virtqueue()
827 umems_destroy(ndev, mvq); in create_virtqueue()
831 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
838 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); in destroy_virtqueue()
843 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
846 umems_destroy(ndev, mvq); in destroy_virtqueue()
849 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_rqpn() argument
851 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; in get_rqpn()
854 static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_qpn() argument
856 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; in get_qpn()
954 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
962 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
971 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
975 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
979 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
983 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
987 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
991 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
995 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
999 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1007 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1025 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in query_virtqueue()
1043 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1060 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in modify_virtqueue()
1070 mvq->fw_state = state; in modify_virtqueue()
1075 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1077 u16 idx = mvq->index; in setup_vq()
1080 if (!mvq->num_ent) in setup_vq()
1083 if (mvq->initialized) { in setup_vq()
1088 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1092 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1096 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1100 err = connect_qps(ndev, mvq); in setup_vq()
1104 err = create_virtqueue(ndev, mvq); in setup_vq()
1108 if (mvq->ready) { in setup_vq()
1109 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1117 mvq->initialized = true; in setup_vq()
1121 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1123 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1129 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1133 if (!mvq->initialized) in suspend_vq()
1136 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in suspend_vq()
1139 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1142 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1146 mvq->avail_idx = attr.available_index; in suspend_vq()
1157 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1159 if (!mvq->initialized) in teardown_vq()
1162 suspend_vq(ndev, mvq); in teardown_vq()
1163 destroy_virtqueue(ndev, mvq); in teardown_vq()
1164 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1165 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1166 cq_destroy(ndev, mvq->index); in teardown_vq()
1167 mvq->initialized = false; in teardown_vq()
1331 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq() local
1333 if (unlikely(!mvq->ready)) in mlx5_vdpa_kick_vq()
1344 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address() local
1346 mvq->desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
1347 mvq->device_addr = device_area; in mlx5_vdpa_set_vq_address()
1348 mvq->driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
1356 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_num() local
1358 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
1359 mvq->num_ent = num; in mlx5_vdpa_set_vq_num()
1375 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready() local
1378 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
1380 mvq->ready = ready; in mlx5_vdpa_set_vq_ready()
1387 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_ready() local
1389 return mvq->ready; in mlx5_vdpa_get_vq_ready()
1397 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state() local
1399 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { in mlx5_vdpa_set_vq_state()
1404 mvq->avail_idx = state->avail_index; in mlx5_vdpa_set_vq_state()
1412 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state() local
1420 if (!mvq->initialized) { in mlx5_vdpa_get_vq_state()
1421 state->avail_index = mvq->avail_idx; in mlx5_vdpa_get_vq_state()
1425 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
1506 struct mlx5_vdpa_virtqueue *mvq; in teardown_virtqueues() local
1510 mvq = &ndev->vqs[i]; in teardown_virtqueues()
1511 if (!mvq->initialized) in teardown_virtqueues()
1514 teardown_vq(ndev, mvq); in teardown_virtqueues()
1579 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
1581 struct mlx5_vq_restore_info *ri = &mvq->ri; in save_channel_info()
1585 if (!mvq->initialized) in save_channel_info()
1588 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
1593 ri->ready = mvq->ready; in save_channel_info()
1594 ri->num_ent = mvq->num_ent; in save_channel_info()
1595 ri->desc_addr = mvq->desc_addr; in save_channel_info()
1596 ri->device_addr = mvq->device_addr; in save_channel_info()
1597 ri->driver_addr = mvq->driver_addr; in save_channel_info()
1598 ri->cb = mvq->event_cb; in save_channel_info()
1624 struct mlx5_vdpa_virtqueue *mvq; in restore_channels_info() local
1631 mvq = &ndev->vqs[i]; in restore_channels_info()
1632 ri = &mvq->ri; in restore_channels_info()
1636 mvq->avail_idx = ri->avail_index; in restore_channels_info()
1637 mvq->ready = ri->ready; in restore_channels_info()
1638 mvq->num_ent = ri->num_ent; in restore_channels_info()
1639 mvq->desc_addr = ri->desc_addr; in restore_channels_info()
1640 mvq->device_addr = ri->device_addr; in restore_channels_info()
1641 mvq->driver_addr = ri->driver_addr; in restore_channels_info()
1642 mvq->event_cb = ri->cb; in restore_channels_info()
1913 struct mlx5_vdpa_virtqueue *mvq; in init_mvqs() local
1917 mvq = &ndev->vqs[i]; in init_mvqs()
1918 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in init_mvqs()
1919 mvq->index = i; in init_mvqs()
1920 mvq->ndev = ndev; in init_mvqs()
1921 mvq->fwqp.fw = true; in init_mvqs()
1924 mvq = &ndev->vqs[i]; in init_mvqs()
1925 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in init_mvqs()
1926 mvq->index = i; in init_mvqs()
1927 mvq->ndev = ndev; in init_mvqs()