Lines Matching refs:mvdev

26 	container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
138 static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx) in is_index_valid() argument
140 if (unlikely(idx > mvdev->max_idx)) in is_index_valid()
147 struct mlx5_vdpa_dev mvdev; member
168 static int setup_driver(struct mlx5_vdpa_dev *mvdev);
178 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
184 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
188 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_is_little_endian() argument
191 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian()
194 static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val) in mlx5vdpa16_to_cpu() argument
196 return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val); in mlx5vdpa16_to_cpu()
199 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) in cpu_to_mlx5vdpa16() argument
201 return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val); in cpu_to_mlx5vdpa16()
209 static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev) in ctrl_vq_idx() argument
211 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) in ctrl_vq_idx()
214 return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); in ctrl_vq_idx()
217 static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx) in is_ctrl_vq_idx() argument
219 return idx == ctrl_vq_idx(mvdev); in is_ctrl_vq_idx()
222 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set) in print_status() argument
225 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n", in print_status()
231 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get"); in print_status()
233 mlx5_vdpa_info(mvdev, "driver resets the device\n"); in print_status()
245 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set) in print_features() argument
248 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n", in print_features()
254 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads"); in print_features()
256 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n"); in print_features()
296 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis() local
303 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
305 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err); in create_tis()
312 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
325 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
326 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
342 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
343 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
348 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
394 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
408 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
410 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
422 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
424 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
429 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
435 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
448 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
464 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
474 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
484 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
498 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
499 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
500 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
502 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
545 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
571 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
572 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
604 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
620 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
641 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
648 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
652 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
656 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
662 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in set_umem_size()
688 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
715 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
723 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
725 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
761 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
798 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
846 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
852 get_features_12_3(ndev->mvdev.actual_features)); in create_virtqueue()
864 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
868 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key); in create_virtqueue()
875 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
876 if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type)) in create_virtqueue()
879 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
903 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
906 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
907 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
939 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
951 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
968 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
986 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1030 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1091 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1092 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1127 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1133 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1175 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1204 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1207 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1246 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); in create_rqt()
1255 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1272 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1293 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); in modify_rqt()
1302 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1318 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1328 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1351 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1368 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1375 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1390 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in add_fwd_to_tir()
1392 mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n"); in add_fwd_to_tir()
1400 ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_fwd_to_tir()
1421 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in add_fwd_to_tir()
1433 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in remove_fwd_to_tir()
1439 static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_mac() argument
1441 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mac()
1442 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac()
1448 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in handle_ctrl_mac()
1462 mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n", in handle_ctrl_mac()
1469 mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n", in handle_ctrl_mac()
1485 static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps) in change_num_qps() argument
1487 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in change_num_qps()
1521 static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_mq() argument
1523 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mq()
1525 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq()
1536 newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs); in handle_ctrl_mq()
1545 if (!change_num_qps(mvdev, newqps)) in handle_ctrl_mq()
1561 struct mlx5_vdpa_dev *mvdev; in mlx5_cvq_kick_handler() local
1568 mvdev = wqent->mvdev; in mlx5_cvq_kick_handler()
1569 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_cvq_kick_handler()
1570 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
1571 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
1589 status = handle_ctrl_mac(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
1592 status = handle_ctrl_mq(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
1616 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_kick_vq() local
1617 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq()
1621 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_kick_vq()
1624 if (unlikely(is_ctrl_vq_idx(mvdev, idx))) { in mlx5_vdpa_kick_vq()
1625 if (!mvdev->cvq.ready) in mlx5_vdpa_kick_vq()
1632 wqent->mvdev = mvdev; in mlx5_vdpa_kick_vq()
1634 queue_work(mvdev->wq, &wqent->work); in mlx5_vdpa_kick_vq()
1642 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
1648 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_address() local
1649 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address()
1652 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_address()
1655 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_address()
1656 mvdev->cvq.desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
1657 mvdev->cvq.device_addr = device_area; in mlx5_vdpa_set_vq_address()
1658 mvdev->cvq.driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
1671 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_num() local
1672 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num()
1675 if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_set_vq_num()
1684 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_cb() local
1685 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb()
1700 static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready) in set_cvq_ready() argument
1702 struct mlx5_control_vq *cvq = &mvdev->cvq; in set_cvq_ready()
1713 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_ready() local
1714 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready()
1717 if (!mvdev->actual_features) in mlx5_vdpa_set_vq_ready()
1720 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_ready()
1723 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_ready()
1724 set_cvq_ready(mvdev, ready); in mlx5_vdpa_set_vq_ready()
1737 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_ready() local
1738 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready()
1740 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_get_vq_ready()
1743 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_get_vq_ready()
1744 return mvdev->cvq.ready; in mlx5_vdpa_get_vq_ready()
1752 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_state() local
1753 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state()
1756 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_state()
1759 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_state()
1760 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
1766 mlx5_vdpa_warn(mvdev, "can't modify available index\n"); in mlx5_vdpa_set_vq_state()
1777 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_state() local
1778 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state()
1783 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_get_vq_state()
1786 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_get_vq_state()
1787 state->split.avail_index = mvdev->cvq.vring.last_avail_idx; in mlx5_vdpa_get_vq_state()
1807 mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); in mlx5_vdpa_get_vq_state()
1843 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_features() local
1844 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_features()
1847 dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask); in mlx5_vdpa_get_features()
1848 ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features); in mlx5_vdpa_get_features()
1849 if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0)) in mlx5_vdpa_get_features()
1850 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1); in mlx5_vdpa_get_features()
1851 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); in mlx5_vdpa_get_features()
1852 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ); in mlx5_vdpa_get_features()
1853 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR); in mlx5_vdpa_get_features()
1854 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ); in mlx5_vdpa_get_features()
1856 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_features()
1857 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_features()
1860 static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features) in verify_min_features() argument
1868 static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev) in setup_virtqueues() argument
1870 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_virtqueues()
1871 struct mlx5_control_vq *cvq = &mvdev->cvq; in setup_virtqueues()
1875 for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) { in setup_virtqueues()
1881 if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) { in setup_virtqueues()
1882 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, in setup_virtqueues()
1905 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
1914 static void update_cvq_info(struct mlx5_vdpa_dev *mvdev) in update_cvq_info() argument
1916 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) { in update_cvq_info()
1917 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) { in update_cvq_info()
1919 mvdev->max_idx = mvdev->max_vqs; in update_cvq_info()
1924 mvdev->max_idx = 2; in update_cvq_info()
1928 mvdev->max_idx = 1; in update_cvq_info()
1934 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_features() local
1935 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_features()
1938 print_features(mvdev, features, true); in mlx5_vdpa_set_features()
1940 err = verify_min_features(mvdev, features); in mlx5_vdpa_set_features()
1944 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_features()
1945 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu); in mlx5_vdpa_set_features()
1946 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_set_features()
1947 update_cvq_info(mvdev); in mlx5_vdpa_set_features()
1975 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_status() local
1976 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status()
1978 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
1979 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2009 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2020 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2032 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2048 static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) in mlx5_vdpa_change_map() argument
2050 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_change_map()
2059 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_change_map()
2060 err = mlx5_vdpa_create_mr(mvdev, iotlb); in mlx5_vdpa_change_map()
2064 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_vdpa_change_map()
2068 err = setup_driver(mvdev); in mlx5_vdpa_change_map()
2075 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_change_map()
2080 static int setup_driver(struct mlx5_vdpa_dev *mvdev) in setup_driver() argument
2082 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_driver()
2087 mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n"); in setup_driver()
2091 err = setup_virtqueues(mvdev); in setup_driver()
2093 mlx5_vdpa_warn(mvdev, "setup_virtqueues\n"); in setup_driver()
2099 mlx5_vdpa_warn(mvdev, "create_rqt\n"); in setup_driver()
2105 mlx5_vdpa_warn(mvdev, "create_tir\n"); in setup_driver()
2111 mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n"); in setup_driver()
2149 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
2152 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2157 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_status() local
2158 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status()
2161 print_status(mvdev, status, true); in mlx5_vdpa_set_status()
2163 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
2165 err = setup_driver(mvdev); in mlx5_vdpa_set_status()
2167 mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); in mlx5_vdpa_set_status()
2171 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); in mlx5_vdpa_set_status()
2176 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
2180 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
2181 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
2186 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_reset() local
2187 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_reset()
2189 print_status(mvdev, 0, true); in mlx5_vdpa_reset()
2190 mlx5_vdpa_info(mvdev, "performing device reset\n"); in mlx5_vdpa_reset()
2193 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_reset()
2194 ndev->mvdev.status = 0; in mlx5_vdpa_reset()
2195 ndev->mvdev.mlx_features = 0; in mlx5_vdpa_reset()
2197 ndev->mvdev.actual_features = 0; in mlx5_vdpa_reset()
2198 ++mvdev->generation; in mlx5_vdpa_reset()
2199 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_reset()
2200 if (mlx5_vdpa_create_mr(mvdev, NULL)) in mlx5_vdpa_reset()
2201 mlx5_vdpa_warn(mvdev, "create MR failed\n"); in mlx5_vdpa_reset()
2215 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_config() local
2216 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config()
2230 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_generation() local
2232 return mvdev->generation; in mlx5_vdpa_get_generation()
2237 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_map() local
2241 err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); in mlx5_vdpa_set_map()
2243 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); in mlx5_vdpa_set_map()
2248 return mlx5_vdpa_change_map(mvdev, iotlb); in mlx5_vdpa_set_map()
2255 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_free() local
2259 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
2262 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_free()
2264 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in mlx5_vdpa_free()
2267 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
2273 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_get_vq_notification() local
2278 if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx)) in mlx5_get_vq_notification()
2285 if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT) in mlx5_get_vq_notification()
2288 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_notification()
2289 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
2348 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
2352 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
2365 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
2377 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
2386 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) { in init_mvqs()
2393 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
2412 struct mlx5_vdpa_dev *mvdev; in mlx5_vdpa_dev_add() local
2432 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_dev_add()
2437 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
2438 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
2439 mvdev->mdev = mdev; in mlx5_vdpa_dev_add()
2457 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC); in mlx5_vdpa_dev_add()
2460 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs)); in mlx5_vdpa_dev_add()
2461 mvdev->vdev.dma_dev = &mdev->pdev->dev; in mlx5_vdpa_dev_add()
2462 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
2466 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_dev_add()
2467 err = mlx5_vdpa_create_mr(mvdev, NULL); in mlx5_vdpa_dev_add()
2476 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_ctrl_wq"); in mlx5_vdpa_dev_add()
2477 if (!mvdev->wq) { in mlx5_vdpa_dev_add()
2483 mvdev->vdev.mdev = &mgtdev->mgtdev; in mlx5_vdpa_dev_add()
2484 err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1); in mlx5_vdpa_dev_add()
2492 destroy_workqueue(mvdev->wq); in mlx5_vdpa_dev_add()
2496 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_dev_add()
2498 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
2504 put_device(&mvdev->vdev.dev); in mlx5_vdpa_dev_add()
2511 struct mlx5_vdpa_dev *mvdev = to_mvdev(dev); in mlx5_vdpa_dev_del() local
2513 destroy_workqueue(mvdev->wq); in mlx5_vdpa_dev_del()