Lines Matching refs:mvq
356 struct mlx5_vdpa_virtqueue *mvq, u32 num_ent) in qp_prepare() argument
362 vqp = fw ? &mvq->fwqp : &mvq->vqqp; in qp_prepare()
382 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare()
401 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
412 vqp = &mvq->vqqp; in qp_create()
413 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
429 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
447 rx_post(vqp, mvq->num_ent); in qp_create()
493 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num) in mlx5_vdpa_handle_completions() argument
495 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions()
498 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
499 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions()
505 rx_post(&mvq->vqqp, num); in mlx5_vdpa_handle_completions()
512 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq); in mlx5_vdpa_cq_comp() local
513 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp()
517 while (!mlx5_vdpa_poll_one(&mvq->cq)) { in mlx5_vdpa_cq_comp()
519 if (num > mvq->num_ent / 2) { in mlx5_vdpa_cq_comp()
526 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
532 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
534 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp()
539 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create() local
543 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_create()
601 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in cq_create()
616 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy() local
618 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_destroy()
668 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
678 *umemp = &mvq->umem1; in set_umem_size()
683 *umemp = &mvq->umem2; in set_umem_size()
688 *umemp = &mvq->umem3; in set_umem_size()
692 (*umemp)->size = p_a * mvq->num_ent + p_b; in set_umem_size()
700 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
710 set_umem_size(ndev, mvq, num, &umem); in create_umem()
750 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
758 umem = &mvq->umem1; in umem_destroy()
761 umem = &mvq->umem2; in umem_destroy()
764 umem = &mvq->umem3; in umem_destroy()
776 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
782 err = create_umem(ndev, mvq, num); in umems_create()
790 umem_destroy(ndev, mvq, num); in umems_create()
795 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
800 umem_destroy(ndev, mvq, num); in umems_destroy()
860 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
871 err = umems_create(ndev, mvq); in create_virtqueue()
889 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in create_virtqueue()
890 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in create_virtqueue()
898 if (vq_is_tx(mvq->index)) in create_virtqueue()
901 if (mvq->map.virq) { in create_virtqueue()
903 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index); in create_virtqueue()
906 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); in create_virtqueue()
909 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); in create_virtqueue()
910 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); in create_virtqueue()
913 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in create_virtqueue()
914 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in create_virtqueue()
915 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in create_virtqueue()
917 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); in create_virtqueue()
918 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); in create_virtqueue()
919 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); in create_virtqueue()
920 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size); in create_virtqueue()
921 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); in create_virtqueue()
922 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); in create_virtqueue()
925 MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id); in create_virtqueue()
931 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT; in create_virtqueue()
933 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in create_virtqueue()
940 umems_destroy(ndev, mvq); in create_virtqueue()
944 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
951 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); in destroy_virtqueue()
956 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
959 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in destroy_virtqueue()
960 umems_destroy(ndev, mvq); in destroy_virtqueue()
963 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_rqpn() argument
965 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; in get_rqpn()
968 static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_qpn() argument
970 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; in get_qpn()
1068 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
1076 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
1085 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
1089 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
1093 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
1097 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1101 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1105 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1109 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1113 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1122 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1140 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in query_virtqueue()
1173 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1182 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE) in modify_virtqueue()
1185 if (!is_valid_state_change(mvq->fw_state, state)) in modify_virtqueue()
1196 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in modify_virtqueue()
1206 mvq->fw_state = state; in modify_virtqueue()
1211 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_alloc() argument
1231 mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in counter_set_alloc()
1236 static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_dealloc() argument
1245 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id); in counter_set_dealloc()
1249 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1263 struct mlx5_vdpa_virtqueue *mvq) in alloc_vector() argument
1274 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1275 ent->dev_id = &ndev->event_cbs[mvq->index]; in alloc_vector()
1282 mvq->map = ent->map; in alloc_vector()
1289 struct mlx5_vdpa_virtqueue *mvq) in dealloc_vector() argument
1295 if (mvq->map.virq == irqp->entries[i].map.virq) { in dealloc_vector()
1296 free_irq(mvq->map.virq, irqp->entries[i].dev_id); in dealloc_vector()
1302 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1304 u16 idx = mvq->index; in setup_vq()
1307 if (!mvq->num_ent) in setup_vq()
1310 if (mvq->initialized) in setup_vq()
1313 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1317 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1321 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1325 err = connect_qps(ndev, mvq); in setup_vq()
1329 err = counter_set_alloc(ndev, mvq); in setup_vq()
1333 alloc_vector(ndev, mvq); in setup_vq()
1334 err = create_virtqueue(ndev, mvq); in setup_vq()
1338 if (mvq->ready) { in setup_vq()
1339 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1347 mvq->initialized = true; in setup_vq()
1351 destroy_virtqueue(ndev, mvq); in setup_vq()
1353 dealloc_vector(ndev, mvq); in setup_vq()
1354 counter_set_dealloc(ndev, mvq); in setup_vq()
1356 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1358 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1364 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1368 if (!mvq->initialized) in suspend_vq()
1371 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in suspend_vq()
1374 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1377 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1381 mvq->avail_idx = attr.available_index; in suspend_vq()
1382 mvq->used_idx = attr.used_index; in suspend_vq()
1393 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1395 if (!mvq->initialized) in teardown_vq()
1398 suspend_vq(ndev, mvq); in teardown_vq()
1399 destroy_virtqueue(ndev, mvq); in teardown_vq()
1400 dealloc_vector(ndev, mvq); in teardown_vq()
1401 counter_set_dealloc(ndev, mvq); in teardown_vq()
1402 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1403 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1404 cq_destroy(ndev, mvq->index); in teardown_vq()
1405 mvq->initialized = false; in teardown_vq()
2089 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_kick_vq() local
2102 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
2103 if (unlikely(!mvq->ready)) in mlx5_vdpa_kick_vq()
2114 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_address() local
2126 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
2127 mvq->desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2128 mvq->device_addr = device_area; in mlx5_vdpa_set_vq_address()
2129 mvq->driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2137 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_num() local
2142 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
2143 mvq->num_ent = num; in mlx5_vdpa_set_vq_num()
2181 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_ready() local
2195 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2197 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
2199 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in mlx5_vdpa_set_vq_ready()
2207 mvq->ready = ready; in mlx5_vdpa_set_vq_ready()
2229 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_state() local
2239 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2240 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { in mlx5_vdpa_set_vq_state()
2245 mvq->used_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2246 mvq->avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2254 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_get_vq_state() local
2266 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2271 if (!mvq->initialized) { in mlx5_vdpa_get_vq_state()
2276 state->split.avail_index = mvq->used_idx; in mlx5_vdpa_get_vq_state()
2280 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
2405 struct mlx5_vdpa_virtqueue *mvq; in teardown_virtqueues() local
2409 mvq = &ndev->vqs[i]; in teardown_virtqueues()
2410 if (!mvq->initialized) in teardown_virtqueues()
2413 teardown_vq(ndev, mvq); in teardown_virtqueues()
2607 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
2609 struct mlx5_vq_restore_info *ri = &mvq->ri; in save_channel_info()
2613 if (mvq->initialized) { in save_channel_info()
2614 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
2621 ri->ready = mvq->ready; in save_channel_info()
2622 ri->num_ent = mvq->num_ent; in save_channel_info()
2623 ri->desc_addr = mvq->desc_addr; in save_channel_info()
2624 ri->device_addr = mvq->device_addr; in save_channel_info()
2625 ri->driver_addr = mvq->driver_addr; in save_channel_info()
2626 ri->map = mvq->map; in save_channel_info()
2652 struct mlx5_vdpa_virtqueue *mvq; in restore_channels_info() local
2659 mvq = &ndev->vqs[i]; in restore_channels_info()
2660 ri = &mvq->ri; in restore_channels_info()
2664 mvq->avail_idx = ri->avail_index; in restore_channels_info()
2665 mvq->used_idx = ri->used_index; in restore_channels_info()
2666 mvq->ready = ri->ready; in restore_channels_info()
2667 mvq->num_ent = ri->num_ent; in restore_channels_info()
2668 mvq->desc_addr = ri->desc_addr; in restore_channels_info()
2669 mvq->device_addr = ri->device_addr; in restore_channels_info()
2670 mvq->driver_addr = ri->driver_addr; in restore_channels_info()
2671 mvq->map = ri->map; in restore_channels_info()
3027 struct mlx5_vdpa_virtqueue *mvq; in mlx5_get_vq_irq() local
3035 mvq = &ndev->vqs[idx]; in mlx5_get_vq_irq()
3036 if (!mvq->map.virq) in mlx5_get_vq_irq()
3039 return mvq->map.virq; in mlx5_get_vq_irq()
3049 static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in counter_set_query() argument
3061 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in counter_set_query()
3069 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id); in counter_set_query()
3087 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_get_vendor_vq_stats() local
3107 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
3108 err = counter_set_query(ndev, mvq, &received_desc, &completed_desc); in mlx5_vdpa_get_vendor_vq_stats()
3151 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_suspend() local
3159 mvq = &ndev->vqs[i]; in mlx5_vdpa_suspend()
3160 suspend_vq(ndev, mvq); in mlx5_vdpa_suspend()
3269 struct mlx5_vdpa_virtqueue *mvq; in init_mvqs() local
3273 mvq = &ndev->vqs[i]; in init_mvqs()
3274 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in init_mvqs()
3275 mvq->index = i; in init_mvqs()
3276 mvq->ndev = ndev; in init_mvqs()
3277 mvq->fwqp.fw = true; in init_mvqs()
3278 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in init_mvqs()
3281 mvq = &ndev->vqs[i]; in init_mvqs()
3282 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in init_mvqs()
3283 mvq->index = i; in init_mvqs()
3284 mvq->ndev = ndev; in init_mvqs()