Lines Matching full:msg
161 static int put_driver_name_print_type(struct sk_buff *msg, const char *name, in put_driver_name_print_type() argument
164 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) in put_driver_name_print_type()
167 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) in put_driver_name_print_type()
173 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, in _rdma_nl_put_driver_u32() argument
177 if (put_driver_name_print_type(msg, name, print_type)) in _rdma_nl_put_driver_u32()
179 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) in _rdma_nl_put_driver_u32()
185 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, in _rdma_nl_put_driver_u64() argument
189 if (put_driver_name_print_type(msg, name, print_type)) in _rdma_nl_put_driver_u64()
191 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, in _rdma_nl_put_driver_u64()
198 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, in rdma_nl_put_driver_string() argument
201 if (put_driver_name_print_type(msg, name, in rdma_nl_put_driver_string()
204 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) in rdma_nl_put_driver_string()
211 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) in rdma_nl_put_driver_u32() argument
213 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, in rdma_nl_put_driver_u32()
218 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, in rdma_nl_put_driver_u32_hex() argument
221 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, in rdma_nl_put_driver_u32_hex()
226 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) in rdma_nl_put_driver_u64() argument
228 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, in rdma_nl_put_driver_u64()
233 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) in rdma_nl_put_driver_u64_hex() argument
235 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, in rdma_nl_put_driver_u64_hex()
240 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) in fill_nldev_handle() argument
242 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) in fill_nldev_handle()
244 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, in fill_nldev_handle()
251 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) in fill_dev_info() argument
257 if (fill_nldev_handle(msg, device)) in fill_dev_info()
260 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) in fill_dev_info()
264 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, in fill_dev_info()
271 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) in fill_dev_info()
274 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, in fill_dev_info()
278 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, in fill_dev_info()
282 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) in fill_dev_info()
284 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) in fill_dev_info()
294 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); in fill_dev_info()
296 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); in fill_dev_info()
298 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); in fill_dev_info()
300 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); in fill_dev_info()
302 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, in fill_dev_info()
307 static int fill_port_info(struct sk_buff *msg, in fill_port_info() argument
316 if (fill_nldev_handle(msg, device)) in fill_port_info()
319 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) in fill_port_info()
331 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, in fill_port_info()
334 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, in fill_port_info()
337 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) in fill_port_info()
339 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) in fill_port_info()
341 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) in fill_port_info()
344 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) in fill_port_info()
346 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) in fill_port_info()
351 ret = nla_put_u32(msg, in fill_port_info()
355 ret = nla_put_string(msg, in fill_port_info()
365 static int fill_res_info_entry(struct sk_buff *msg, in fill_res_info_entry() argument
370 entry_attr = nla_nest_start_noflag(msg, in fill_res_info_entry()
375 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) in fill_res_info_entry()
377 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, in fill_res_info_entry()
381 nla_nest_end(msg, entry_attr); in fill_res_info_entry()
385 nla_nest_cancel(msg, entry_attr); in fill_res_info_entry()
389 static int fill_res_info(struct sk_buff *msg, struct ib_device *device) in fill_res_info() argument
404 if (fill_nldev_handle(msg, device)) in fill_res_info()
407 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); in fill_res_info()
415 ret = fill_res_info_entry(msg, names[i], curr); in fill_res_info()
420 nla_nest_end(msg, table_attr); in fill_res_info()
424 nla_nest_cancel(msg, table_attr); in fill_res_info()
428 static int fill_res_name_pid(struct sk_buff *msg, in fill_res_name_pid() argument
438 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, in fill_res_name_pid()
455 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); in fill_res_name_pid()
461 static int fill_res_qp_entry_query(struct sk_buff *msg, in fill_res_qp_entry_query() argument
475 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, in fill_res_qp_entry_query()
478 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, in fill_res_qp_entry_query()
483 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) in fill_res_qp_entry_query()
488 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, in fill_res_qp_entry_query()
492 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) in fill_res_qp_entry_query()
494 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) in fill_res_qp_entry_query()
498 return dev->ops.fill_res_qp_entry(msg, qp); in fill_res_qp_entry_query()
504 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_qp_entry() argument
515 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) in fill_res_qp_entry()
518 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); in fill_res_qp_entry()
523 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) in fill_res_qp_entry()
526 ret = fill_res_name_pid(msg, res); in fill_res_qp_entry()
530 return fill_res_qp_entry_query(msg, res, dev, qp); in fill_res_qp_entry()
533 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_qp_raw_entry() argument
543 return dev->ops.fill_res_qp_entry_raw(msg, qp); in fill_res_qp_raw_entry()
546 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cm_id_entry() argument
558 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) in fill_res_cm_id_entry()
562 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) in fill_res_cm_id_entry()
564 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) in fill_res_cm_id_entry()
568 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) in fill_res_cm_id_entry()
571 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) in fill_res_cm_id_entry()
575 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, in fill_res_cm_id_entry()
580 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, in fill_res_cm_id_entry()
585 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) in fill_res_cm_id_entry()
588 if (fill_res_name_pid(msg, res)) in fill_res_cm_id_entry()
592 return dev->ops.fill_res_cm_id_entry(msg, cm_id); in fill_res_cm_id_entry()
598 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cq_entry() argument
604 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) in fill_res_cq_entry()
606 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, in fill_res_cq_entry()
612 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) in fill_res_cq_entry()
615 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) in fill_res_cq_entry()
618 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) in fill_res_cq_entry()
621 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, in fill_res_cq_entry()
625 if (fill_res_name_pid(msg, res)) in fill_res_cq_entry()
629 dev->ops.fill_res_cq_entry(msg, cq) : 0; in fill_res_cq_entry()
632 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cq_raw_entry() argument
640 return dev->ops.fill_res_cq_entry_raw(msg, cq); in fill_res_cq_raw_entry()
643 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_mr_entry() argument
650 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) in fill_res_mr_entry()
652 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) in fill_res_mr_entry()
656 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, in fill_res_mr_entry()
660 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) in fill_res_mr_entry()
664 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) in fill_res_mr_entry()
667 if (fill_res_name_pid(msg, res)) in fill_res_mr_entry()
671 dev->ops.fill_res_mr_entry(msg, mr) : in fill_res_mr_entry()
675 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_mr_raw_entry() argument
683 return dev->ops.fill_res_mr_entry_raw(msg, mr); in fill_res_mr_raw_entry()
686 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_pd_entry() argument
692 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, in fill_res_pd_entry()
696 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, in fill_res_pd_entry()
700 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, in fill_res_pd_entry()
704 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) in fill_res_pd_entry()
708 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, in fill_res_pd_entry()
712 return fill_res_name_pid(msg, res); in fill_res_pd_entry()
717 static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_ctx_entry() argument
725 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) in fill_res_ctx_entry()
728 return fill_res_name_pid(msg, res); in fill_res_ctx_entry()
731 static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, in fill_res_range_qp_entry() argument
739 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); in fill_res_range_qp_entry()
744 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) in fill_res_range_qp_entry()
747 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) in fill_res_range_qp_entry()
749 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) in fill_res_range_qp_entry()
752 nla_nest_end(msg, entry_attr); in fill_res_range_qp_entry()
756 nla_nest_cancel(msg, entry_attr); in fill_res_range_qp_entry()
760 static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) in fill_res_srq_qps() argument
769 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); in fill_res_srq_qps()
792 if (fill_res_range_qp_entry(msg, min_range, prev)) in fill_res_srq_qps()
803 if (fill_res_range_qp_entry(msg, min_range, prev)) in fill_res_srq_qps()
806 nla_nest_end(msg, table_attr); in fill_res_srq_qps()
813 nla_nest_cancel(msg, table_attr); in fill_res_srq_qps()
817 static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_srq_entry() argument
822 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) in fill_res_srq_entry()
825 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) in fill_res_srq_entry()
828 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) in fill_res_srq_entry()
832 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, in fill_res_srq_entry()
837 if (fill_res_srq_qps(msg, srq)) in fill_res_srq_entry()
840 return fill_res_name_pid(msg, res); in fill_res_srq_entry()
846 static int fill_stat_counter_mode(struct sk_buff *msg, in fill_stat_counter_mode() argument
851 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) in fill_stat_counter_mode()
856 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) in fill_stat_counter_mode()
860 fill_res_name_pid(msg, &counter->res)) in fill_stat_counter_mode()
867 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) in fill_stat_counter_qp_entry() argument
871 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); in fill_stat_counter_qp_entry()
875 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) in fill_stat_counter_qp_entry()
878 nla_nest_end(msg, entry_attr); in fill_stat_counter_qp_entry()
882 nla_nest_cancel(msg, entry_attr); in fill_stat_counter_qp_entry()
886 static int fill_stat_counter_qps(struct sk_buff *msg, in fill_stat_counter_qps() argument
896 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); in fill_stat_counter_qps()
905 ret = fill_stat_counter_qp_entry(msg, qp->qp_num); in fill_stat_counter_qps()
911 nla_nest_end(msg, table_attr); in fill_stat_counter_qps()
916 nla_nest_cancel(msg, table_attr); in fill_stat_counter_qps()
920 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, in rdma_nl_stat_hwcounter_entry() argument
925 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); in rdma_nl_stat_hwcounter_entry()
929 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, in rdma_nl_stat_hwcounter_entry()
932 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, in rdma_nl_stat_hwcounter_entry()
936 nla_nest_end(msg, entry_attr); in rdma_nl_stat_hwcounter_entry()
940 nla_nest_cancel(msg, entry_attr); in rdma_nl_stat_hwcounter_entry()
945 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_stat_mr_entry() argument
951 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) in fill_stat_mr_entry()
955 return dev->ops.fill_stat_mr_entry(msg, mr); in fill_stat_mr_entry()
962 static int fill_stat_counter_hwcounters(struct sk_buff *msg, in fill_stat_counter_hwcounters() argument
969 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in fill_stat_counter_hwcounters()
977 if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, in fill_stat_counter_hwcounters()
983 nla_nest_end(msg, table_attr); in fill_stat_counter_hwcounters()
988 nla_nest_cancel(msg, table_attr); in fill_stat_counter_hwcounters()
992 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_counter_entry() argument
1005 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || in fill_res_counter_entry()
1006 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || in fill_res_counter_entry()
1007 fill_stat_counter_mode(msg, counter) || in fill_res_counter_entry()
1008 fill_stat_counter_qps(msg, counter) || in fill_res_counter_entry()
1009 fill_stat_counter_hwcounters(msg, counter)) in fill_res_counter_entry()
1020 struct sk_buff *msg; in nldev_get_doit() local
1035 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_get_doit()
1036 if (!msg) { in nldev_get_doit()
1041 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_get_doit()
1045 err = fill_dev_info(msg, device); in nldev_get_doit()
1049 nlmsg_end(msg, nlh); in nldev_get_doit()
1052 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_doit()
1055 nlmsg_free(msg); in nldev_get_doit()
1156 struct sk_buff *msg; in nldev_port_get_doit() local
1179 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_port_get_doit()
1180 if (!msg) { in nldev_port_get_doit()
1185 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_port_get_doit()
1189 err = fill_port_info(msg, device, port, sock_net(skb->sk)); in nldev_port_get_doit()
1193 nlmsg_end(msg, nlh); in nldev_port_get_doit()
1196 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_port_get_doit()
1199 nlmsg_free(msg); in nldev_port_get_doit()
1268 struct sk_buff *msg; in nldev_res_get_doit() local
1282 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_res_get_doit()
1283 if (!msg) { in nldev_res_get_doit()
1288 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_res_get_doit()
1292 ret = fill_res_info(msg, device); in nldev_res_get_doit()
1296 nlmsg_end(msg, nlh); in nldev_res_get_doit()
1298 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_res_get_doit()
1301 nlmsg_free(msg); in nldev_res_get_doit()
1412 struct sk_buff *msg; in res_get_common_doit() local
1446 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in res_get_common_doit()
1447 if (!msg) { in res_get_common_doit()
1452 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in res_get_common_doit()
1457 if (fill_nldev_handle(msg, device)) { in res_get_common_doit()
1464 ret = fill_func(msg, has_cap_net_admin, res, port); in res_get_common_doit()
1469 nlmsg_end(msg, nlh); in res_get_common_doit()
1471 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in res_get_common_doit()
1474 nlmsg_free(msg); in res_get_common_doit()
1758 struct sk_buff *msg; in nldev_get_chardev() local
1789 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_get_chardev()
1790 if (!msg) { in nldev_get_chardev()
1794 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_get_chardev()
1799 data.nl_msg = msg; in nldev_get_chardev()
1804 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, in nldev_get_chardev()
1809 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, in nldev_get_chardev()
1813 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, in nldev_get_chardev()
1819 nlmsg_end(msg, nlh); in nldev_get_chardev()
1823 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_chardev()
1828 nlmsg_free(msg); in nldev_get_chardev()
1839 struct sk_buff *msg; in nldev_sys_get_doit() local
1847 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_sys_get_doit()
1848 if (!msg) in nldev_sys_get_doit()
1851 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_sys_get_doit()
1856 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, in nldev_sys_get_doit()
1859 nlmsg_free(msg); in nldev_sys_get_doit()
1873 nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); in nldev_sys_get_doit()
1875 nlmsg_end(msg, nlh); in nldev_sys_get_doit()
1876 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_sys_get_doit()
1900 static int nldev_stat_set_mode_doit(struct sk_buff *msg, in nldev_stat_set_mode_doit() argument
1936 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || in nldev_stat_set_mode_doit()
1937 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { in nldev_stat_set_mode_doit()
1999 struct sk_buff *msg; in nldev_stat_set_doit() local
2026 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_set_doit()
2027 if (!msg) { in nldev_stat_set_doit()
2031 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_set_doit()
2035 if (fill_nldev_handle(msg, device) || in nldev_stat_set_doit()
2036 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { in nldev_stat_set_doit()
2042 ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); in nldev_stat_set_doit()
2053 nlmsg_end(msg, nlh); in nldev_stat_set_doit()
2055 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_set_doit()
2058 nlmsg_free(msg); in nldev_stat_set_doit()
2069 struct sk_buff *msg; in nldev_stat_del_doit() local
2095 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_del_doit()
2096 if (!msg) { in nldev_stat_del_doit()
2100 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_del_doit()
2107 if (fill_nldev_handle(msg, device) || in nldev_stat_del_doit()
2108 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || in nldev_stat_del_doit()
2109 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || in nldev_stat_del_doit()
2110 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { in nldev_stat_del_doit()
2119 nlmsg_end(msg, nlh); in nldev_stat_del_doit()
2121 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_del_doit()
2124 nlmsg_free(msg); in nldev_stat_del_doit()
2139 struct sk_buff *msg; in stat_get_doit_default_counter() local
2163 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in stat_get_doit_default_counter()
2164 if (!msg) { in stat_get_doit_default_counter()
2169 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in stat_get_doit_default_counter()
2174 if (fill_nldev_handle(msg, device) || in stat_get_doit_default_counter()
2175 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { in stat_get_doit_default_counter()
2188 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in stat_get_doit_default_counter()
2199 if (rdma_nl_stat_hwcounter_entry(msg, in stat_get_doit_default_counter()
2205 nla_nest_end(msg, table_attr); in stat_get_doit_default_counter()
2208 nlmsg_end(msg, nlh); in stat_get_doit_default_counter()
2210 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in stat_get_doit_default_counter()
2213 nla_nest_cancel(msg, table_attr); in stat_get_doit_default_counter()
2217 nlmsg_free(msg); in stat_get_doit_default_counter()
2230 struct sk_buff *msg; in stat_get_doit_qp() local
2252 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in stat_get_doit_qp()
2253 if (!msg) { in stat_get_doit_qp()
2258 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in stat_get_doit_qp()
2267 if (fill_nldev_handle(msg, device) || in stat_get_doit_qp()
2268 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || in stat_get_doit_qp()
2269 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { in stat_get_doit_qp()
2275 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { in stat_get_doit_qp()
2280 nlmsg_end(msg, nlh); in stat_get_doit_qp()
2282 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in stat_get_doit_qp()
2285 nlmsg_free(msg); in stat_get_doit_qp()
2355 struct sk_buff *msg; in nldev_stat_get_counter_status_doit() local
2382 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_get_counter_status_doit()
2383 if (!msg) { in nldev_stat_get_counter_status_doit()
2389 msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_get_counter_status_doit()
2394 if (fill_nldev_handle(msg, device) || in nldev_stat_get_counter_status_doit()
2395 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) in nldev_stat_get_counter_status_doit()
2398 table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in nldev_stat_get_counter_status_doit()
2404 entry = nla_nest_start(msg, in nldev_stat_get_counter_status_doit()
2409 if (nla_put_string(msg, in nldev_stat_get_counter_status_doit()
2412 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) in nldev_stat_get_counter_status_doit()
2416 (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, in nldev_stat_get_counter_status_doit()
2420 nla_nest_end(msg, entry); in nldev_stat_get_counter_status_doit()
2424 nla_nest_end(msg, table); in nldev_stat_get_counter_status_doit()
2425 nlmsg_end(msg, nlh); in nldev_stat_get_counter_status_doit()
2427 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_get_counter_status_doit()
2430 nla_nest_cancel(msg, entry); in nldev_stat_get_counter_status_doit()
2433 nla_nest_cancel(msg, table); in nldev_stat_get_counter_status_doit()
2435 nlmsg_free(msg); in nldev_stat_get_counter_status_doit()