Lines Matching refs:mvdev

27 	container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
137 static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx) in is_index_valid() argument
139 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) { in is_index_valid()
140 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in is_index_valid()
146 return idx <= mvdev->max_idx; in is_index_valid()
152 struct mlx5_vdpa_dev mvdev; member
183 static int setup_driver(struct mlx5_vdpa_dev *mvdev);
193 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
199 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
203 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_is_little_endian() argument
206 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian()
209 static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val) in mlx5vdpa16_to_cpu() argument
211 return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val); in mlx5vdpa16_to_cpu()
214 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val) in cpu_to_mlx5vdpa16() argument
216 return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val); in cpu_to_mlx5vdpa16()
219 static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev) in ctrl_vq_idx() argument
221 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) in ctrl_vq_idx()
224 return mvdev->max_vqs; in ctrl_vq_idx()
227 static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx) in is_ctrl_vq_idx() argument
229 return idx == ctrl_vq_idx(mvdev); in is_ctrl_vq_idx()
232 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set) in print_status() argument
235 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n", in print_status()
241 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get"); in print_status()
243 mlx5_vdpa_info(mvdev, "driver resets the device\n"); in print_status()
255 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set) in print_features() argument
258 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n", in print_features()
264 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads"); in print_features()
266 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n"); in print_features()
306 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis() local
313 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
315 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err); in create_tis()
322 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
335 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
336 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
352 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
353 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
358 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
404 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
418 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
420 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
432 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
434 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
439 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
445 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
458 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
474 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
484 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
494 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
508 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
509 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
510 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
512 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
555 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
581 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
582 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
614 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
630 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
651 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
658 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
662 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
666 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
672 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in set_umem_size()
698 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
725 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
733 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
735 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
771 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
808 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
832 static bool counters_supported(const struct mlx5_vdpa_dev *mvdev) in counters_supported() argument
834 return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) & in counters_supported()
862 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
868 get_features_12_3(ndev->mvdev.actual_features)); in create_virtqueue()
880 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
884 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey); in create_virtqueue()
891 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
892 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
895 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
920 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
923 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
924 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
957 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
969 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
986 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1004 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1048 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1109 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1110 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1165 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1171 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1186 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1193 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1195 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1209 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1214 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1216 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1217 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1258 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1289 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1292 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1303 for (i = 0; i < ndev->mvdev.max_vqs; i++) in suspend_vqs()
1337 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1347 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1372 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1382 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1392 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1415 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1432 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1439 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1611 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
1613 mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
1619 mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
1640 static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_mac() argument
1642 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mac()
1643 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac()
1649 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in handle_ctrl_mac()
1666 mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n", in handle_ctrl_mac()
1673 mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n", in handle_ctrl_mac()
1690 mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n"); in handle_ctrl_mac()
1694 mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n"); in handle_ctrl_mac()
1702 mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n", in handle_ctrl_mac()
1707 mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n", in handle_ctrl_mac()
1714 mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n"); in handle_ctrl_mac()
1729 static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps) in change_num_qps() argument
1731 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in change_num_qps()
1767 static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_mq() argument
1769 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mq()
1771 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq()
1787 if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) in handle_ctrl_mq()
1794 newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs); in handle_ctrl_mq()
1804 if (!change_num_qps(mvdev, newqps)) in handle_ctrl_mq()
1815 static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) in handle_ctrl_vlan() argument
1817 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_vlan()
1819 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan()
1830 id = mlx5vdpa16_to_cpu(mvdev, vlan); in handle_ctrl_vlan()
1841 id = mlx5vdpa16_to_cpu(mvdev, vlan); in handle_ctrl_vlan()
1857 struct mlx5_vdpa_dev *mvdev; in mlx5_cvq_kick_handler() local
1864 mvdev = wqent->mvdev; in mlx5_cvq_kick_handler()
1865 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_cvq_kick_handler()
1866 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
1870 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_cvq_kick_handler()
1873 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
1892 status = handle_ctrl_mac(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
1895 status = handle_ctrl_mq(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
1898 status = handle_ctrl_vlan(mvdev, ctrl.cmd); in mlx5_cvq_kick_handler()
1916 queue_work(mvdev->wq, &wqent->work); in mlx5_cvq_kick_handler()
1926 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_kick_vq() local
1927 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq()
1930 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_kick_vq()
1933 if (unlikely(is_ctrl_vq_idx(mvdev, idx))) { in mlx5_vdpa_kick_vq()
1934 if (!mvdev->wq || !mvdev->cvq.ready) in mlx5_vdpa_kick_vq()
1937 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
1945 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
1951 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_address() local
1952 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address()
1955 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_address()
1958 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_address()
1959 mvdev->cvq.desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
1960 mvdev->cvq.device_addr = device_area; in mlx5_vdpa_set_vq_address()
1961 mvdev->cvq.driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
1974 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_num() local
1975 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num()
1978 if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_set_vq_num()
1987 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_cb() local
1988 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb()
1991 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_set_vq_cb()
1992 mvdev->cvq.event_cb = *cb; in mlx5_vdpa_set_vq_cb()
2005 static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready) in set_cvq_ready() argument
2007 struct mlx5_control_vq *cvq = &mvdev->cvq; in set_cvq_ready()
2018 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_ready() local
2019 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready()
2023 if (!mvdev->actual_features) in mlx5_vdpa_set_vq_ready()
2026 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_ready()
2029 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_ready()
2030 set_cvq_ready(mvdev, ready); in mlx5_vdpa_set_vq_ready()
2040 mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err); in mlx5_vdpa_set_vq_ready()
2051 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_ready() local
2052 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready()
2054 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_get_vq_ready()
2057 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_get_vq_ready()
2058 return mvdev->cvq.ready; in mlx5_vdpa_get_vq_ready()
2066 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_vq_state() local
2067 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state()
2070 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_set_vq_state()
2073 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_set_vq_state()
2074 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2080 mlx5_vdpa_warn(mvdev, "can't modify available index\n"); in mlx5_vdpa_set_vq_state()
2091 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_state() local
2092 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state()
2097 if (!is_index_valid(mvdev, idx)) in mlx5_vdpa_get_vq_state()
2100 if (is_ctrl_vq_idx(mvdev, idx)) { in mlx5_vdpa_get_vq_state()
2101 state->split.avail_index = mvdev->cvq.vring.last_avail_idx; in mlx5_vdpa_get_vq_state()
2121 mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); in mlx5_vdpa_get_vq_state()
2135 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vq_group() local
2137 if (is_ctrl_vq_idx(mvdev, idx)) in mlx5_vdpa_get_vq_group()
2187 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_device_features() local
2188 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_device_features()
2190 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2191 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2194 static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) in verify_driver_features() argument
2216 static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev) in setup_virtqueues() argument
2218 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_virtqueues()
2222 for (i = 0; i < mvdev->max_vqs; i++) { in setup_virtqueues()
2242 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
2251 static void update_cvq_info(struct mlx5_vdpa_dev *mvdev) in update_cvq_info() argument
2253 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) { in update_cvq_info()
2254 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) { in update_cvq_info()
2256 mvdev->max_idx = mvdev->max_vqs; in update_cvq_info()
2261 mvdev->max_idx = 2; in update_cvq_info()
2265 mvdev->max_idx = 1; in update_cvq_info()
2271 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_driver_features() local
2272 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_driver_features()
2275 print_features(mvdev, features, true); in mlx5_vdpa_set_driver_features()
2277 err = verify_driver_features(mvdev, features); in mlx5_vdpa_set_driver_features()
2281 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2282 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)) in mlx5_vdpa_set_driver_features()
2283 ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs); in mlx5_vdpa_set_driver_features()
2289 update_cvq_info(mvdev); in mlx5_vdpa_set_driver_features()
2295 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_config_cb() local
2296 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_config_cb()
2319 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_status() local
2320 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status()
2322 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2323 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2353 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2364 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2376 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2392 static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) in mlx5_vdpa_change_map() argument
2394 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_change_map()
2403 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_change_map()
2404 err = mlx5_vdpa_create_mr(mvdev, iotlb); in mlx5_vdpa_change_map()
2408 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_vdpa_change_map()
2412 err = setup_driver(mvdev); in mlx5_vdpa_change_map()
2419 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_change_map()
2425 static int setup_driver(struct mlx5_vdpa_dev *mvdev) in setup_driver() argument
2427 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_driver()
2433 mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n"); in setup_driver()
2437 err = setup_virtqueues(mvdev); in setup_driver()
2439 mlx5_vdpa_warn(mvdev, "setup_virtqueues\n"); in setup_driver()
2445 mlx5_vdpa_warn(mvdev, "create_rqt\n"); in setup_driver()
2451 mlx5_vdpa_warn(mvdev, "create_tir\n"); in setup_driver()
2457 mlx5_vdpa_warn(mvdev, "setup_steering\n"); in setup_driver()
2494 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
2497 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2500 static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev) in setup_cvq_vring() argument
2502 struct mlx5_control_vq *cvq = &mvdev->cvq; in setup_cvq_vring()
2505 if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) in setup_cvq_vring()
2506 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, in setup_cvq_vring()
2517 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_status() local
2518 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status()
2521 print_status(mvdev, status, true); in mlx5_vdpa_set_status()
2525 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
2527 err = setup_cvq_vring(mvdev); in mlx5_vdpa_set_status()
2529 mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n"); in mlx5_vdpa_set_status()
2532 err = setup_driver(mvdev); in mlx5_vdpa_set_status()
2534 mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); in mlx5_vdpa_set_status()
2538 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); in mlx5_vdpa_set_status()
2543 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
2548 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
2549 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
2554 static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev) in init_group_to_asid_map() argument
2560 mvdev->group2asid[i] = 0; in init_group_to_asid_map()
2565 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_reset() local
2566 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_reset()
2568 print_status(mvdev, 0, true); in mlx5_vdpa_reset()
2569 mlx5_vdpa_info(mvdev, "performing device reset\n"); in mlx5_vdpa_reset()
2574 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_reset()
2575 ndev->mvdev.status = 0; in mlx5_vdpa_reset()
2577 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_reset()
2578 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_reset()
2579 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_reset()
2580 ndev->mvdev.actual_features = 0; in mlx5_vdpa_reset()
2581 init_group_to_asid_map(mvdev); in mlx5_vdpa_reset()
2582 ++mvdev->generation; in mlx5_vdpa_reset()
2584 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_reset()
2585 if (mlx5_vdpa_create_mr(mvdev, NULL)) in mlx5_vdpa_reset()
2586 mlx5_vdpa_warn(mvdev, "create MR failed\n"); in mlx5_vdpa_reset()
2601 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_config() local
2602 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config()
2616 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_generation() local
2618 return mvdev->generation; in mlx5_vdpa_get_generation()
2621 static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) in set_map_control() argument
2627 spin_lock(&mvdev->cvq.iommu_lock); in set_map_control()
2628 vhost_iotlb_reset(mvdev->cvq.iotlb); in set_map_control()
2632 err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, in set_map_control()
2639 spin_unlock(&mvdev->cvq.iommu_lock); in set_map_control()
2643 static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) in set_map_data() argument
2648 err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); in set_map_data()
2650 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); in set_map_data()
2655 err = mlx5_vdpa_change_map(mvdev, iotlb); in set_map_data()
2663 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_set_map() local
2664 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map()
2668 if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { in mlx5_vdpa_set_map()
2669 err = set_map_data(mvdev, iotlb); in mlx5_vdpa_set_map()
2674 if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) in mlx5_vdpa_set_map()
2675 err = set_map_control(mvdev, iotlb); in mlx5_vdpa_set_map()
2684 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_free() local
2688 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
2691 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_free()
2693 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in mlx5_vdpa_free()
2696 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
2703 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_get_vq_notification() local
2708 if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx)) in mlx5_get_vq_notification()
2715 if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT) in mlx5_get_vq_notification()
2718 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_notification()
2719 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
2732 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_driver_features() local
2734 return mvdev->actual_features; in mlx5_vdpa_get_driver_features()
2746 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
2756 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
2759 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
2773 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_get_vendor_vq_stats() local
2774 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vendor_vq_stats()
2782 if (!is_index_valid(mvdev, idx)) { in mlx5_vdpa_get_vendor_vq_stats()
2788 if (idx == ctrl_vq_idx(mvdev)) { in mlx5_vdpa_get_vendor_vq_stats()
2789 cvq = &mvdev->cvq; in mlx5_vdpa_get_vendor_vq_stats()
2824 static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev) in mlx5_vdpa_cvq_suspend() argument
2828 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_vdpa_cvq_suspend()
2831 cvq = &mvdev->cvq; in mlx5_vdpa_cvq_suspend()
2837 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_vdpa_suspend() local
2838 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_suspend()
2843 mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); in mlx5_vdpa_suspend()
2845 flush_workqueue(ndev->mvdev.wq); in mlx5_vdpa_suspend()
2850 mlx5_vdpa_cvq_suspend(mvdev); in mlx5_vdpa_suspend()
2858 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); in mlx5_set_group_asid() local
2863 mvdev->group2asid[group] = asid; in mlx5_set_group_asid()
2920 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
2924 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
2937 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
2949 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
2958 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in init_mvqs()
2966 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
2999 static bool get_link_state(struct mlx5_vdpa_dev *mvdev) in get_link_state() argument
3001 if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) == in get_link_state()
3011 struct mlx5_vdpa_dev *mvdev; in update_carrier() local
3015 mvdev = wqent->mvdev; in update_carrier()
3016 ndev = to_mlx5_vdpa_ndev(mvdev); in update_carrier()
3017 if (get_link_state(mvdev)) in update_carrier()
3018 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
3020 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
3050 wqent->mvdev = &ndev->mvdev; in event_handler()
3052 queue_work(ndev->mvdev.wq, &wqent->work); in event_handler()
3092 struct mlx5_vdpa_dev *mvdev; in mlx5_vdpa_dev_add() local
3126 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_dev_add()
3131 ndev->mvdev.mlx_features = mgtdev->mgtdev.supported_features; in mlx5_vdpa_dev_add()
3132 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3133 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3134 mvdev->mdev = mdev; in mlx5_vdpa_dev_add()
3157 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3159 if (get_link_state(mvdev)) in mlx5_vdpa_dev_add()
3160 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3162 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3178 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC); in mlx5_vdpa_dev_add()
3181 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2); in mlx5_vdpa_dev_add()
3182 mvdev->vdev.dma_dev = &mdev->pdev->dev; in mlx5_vdpa_dev_add()
3183 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3187 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_dev_add()
3188 err = mlx5_vdpa_create_mr(mvdev, NULL); in mlx5_vdpa_dev_add()
3197 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3199 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); in mlx5_vdpa_dev_add()
3200 if (!mvdev->wq) { in mlx5_vdpa_dev_add()
3208 mvdev->vdev.mdev = &mgtdev->mgtdev; in mlx5_vdpa_dev_add()
3209 err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); in mlx5_vdpa_dev_add()
3217 destroy_workqueue(mvdev->wq); in mlx5_vdpa_dev_add()
3221 mlx5_vdpa_destroy_mr(mvdev); in mlx5_vdpa_dev_add()
3223 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3228 put_device(&mvdev->vdev.dev); in mlx5_vdpa_dev_add()
3235 struct mlx5_vdpa_dev *mvdev = to_mvdev(dev); in mlx5_vdpa_dev_del() local
3236 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_dev_del()
3240 mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); in mlx5_vdpa_dev_del()
3243 wq = mvdev->wq; in mlx5_vdpa_dev_del()
3244 mvdev->wq = NULL; in mlx5_vdpa_dev_del()