/Linux-v5.4/drivers/infiniband/hw/mlx4/ |
D | cm.c | 75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) in set_local_comm_id() argument 77 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in set_local_comm_id() 79 (struct cm_sidr_generic_msg *)mad; in set_local_comm_id() 81 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in set_local_comm_id() 85 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; in set_local_comm_id() 90 static u32 get_local_comm_id(struct ib_mad *mad) in get_local_comm_id() argument 92 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { in get_local_comm_id() 94 (struct cm_sidr_generic_msg *)mad; in get_local_comm_id() 96 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { in get_local_comm_id() 100 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; in get_local_comm_id() [all …]
|
D | mcg.c | 209 static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) in send_mad_to_wire() argument 225 &ah_attr, NULL, 0xffff, mad); in send_mad_to_wire() 229 struct ib_mad *mad) in send_mad_to_slave() argument 249 return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad); in send_mad_to_slave() 254 struct ib_sa_mad mad; in send_join_to_wire() local 255 struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data; in send_join_to_wire() 259 memcpy(&mad, sa_mad, sizeof mad); in send_join_to_wire() 265 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_join_to_wire() 266 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_join_to_wire() 268 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_join_to_wire() [all …]
|
D | mad.c | 82 struct ib_mad mad; member 88 struct ib_mad mad; member 220 static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad, in smp_snoop() argument 231 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in smp_snoop() 232 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in smp_snoop() 233 mad->mad_hdr.method == IB_MGMT_METHOD_SET) in smp_snoop() 234 switch (mad->mad_hdr.attr_id) { in smp_snoop() 238 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; in smp_snoop() 264 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF; in smp_snoop() 265 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]); in smp_snoop() [all …]
|
D | Makefile | 4 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
|
D | mlx4_ib.h | 832 struct ib_sa_mad *mad); 844 struct ib_grh *grh, struct ib_mad *mad); 849 u16 vlan_id, struct ib_mad *mad); 854 struct ib_mad *mad); 857 struct ib_mad *mad);
|
/Linux-v5.4/Documentation/infiniband/ |
D | user_mad.rst | 48 buffer needed is set in mad.length. 52 struct ib_user_mad *mad; 53 mad = malloc(sizeof *mad + 256); 54 ret = read(fd, mad, sizeof *mad + 256); 55 if (ret != sizeof mad + 256) { 57 free(mad); 62 struct ib_user_mad *mad; 63 mad = malloc(sizeof *mad + 256); 64 ret = read(fd, mad, sizeof *mad + 256); 66 length = mad.length; [all …]
|
/Linux-v5.4/drivers/infiniband/core/ |
D | user_mad.c | 131 struct ib_user_mad mad; member 186 for (packet->mad.hdr.id = 0; in queue_packet() 187 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; in queue_packet() 188 packet->mad.hdr.id++) in queue_packet() 189 if (agent == __get_agent(file, packet->mad.hdr.id)) { in queue_packet() 221 packet->mad.hdr.status = ETIMEDOUT; in send_handler() 245 packet->mad.hdr.status = 0; in recv_handler() 246 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; in recv_handler() 247 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler() 253 packet->mad.hdr.lid = ib_lid_be16(0xFFFF & in recv_handler() [all …]
|
D | sa_query.c | 753 struct ib_sa_mad *mad = query->mad_buf->mad; in ib_nl_set_path_rec_attrs() local 754 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; in ib_nl_set_path_rec_attrs() 837 struct ib_sa_mad *mad; in ib_nl_send_msg() local 840 mad = query->mad_buf->mad; in ib_nl_send_msg() 841 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); in ib_nl_send_msg() 927 struct ib_sa_mad *mad = NULL; in ib_nl_process_good_resolve_rsp() local 957 mad = query->mad_buf->mad; in ib_nl_process_good_resolve_rsp() 958 mad->mad_hdr.method |= in ib_nl_process_good_resolve_rsp() 960 memcpy(mad->data, rec->path_rec, in ib_nl_process_good_resolve_rsp() 967 query->callback(query, status, mad); in ib_nl_process_good_resolve_rsp() [all …]
|
D | mad_rmpp.c | 119 struct ib_rmpp_mad *ack = msg->mad; in format_ack() 141 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); in ack_recv() 149 format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); in ack_recv() 168 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); in alloc_response_msg() 194 rmpp_mad = msg->mad; in ack_ds_ack() 195 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); in ack_ds_ack() 228 rmpp_mad = msg->mad; in nack_recv() 229 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); in nack_recv() 317 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; in create_rmpp_recv() 336 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; in find_rmpp_recv() [all …]
|
D | mad.c | 102 const struct ib_mad_hdr *mad); 104 struct ib_mad_private *mad); 812 struct ib_smp *smp = mad_send_wr->send_buf.mad; in handle_outgoing_dr_smp() 917 (struct ib_mad_hdr *)mad_priv->mad, in handle_outgoing_dr_smp() 922 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && in handle_outgoing_dr_smp() 942 memcpy(mad_priv->mad, smp, mad_priv->mad_size); in handle_outgoing_dr_smp() 944 (const struct ib_mad_hdr *)mad_priv->mad); in handle_outgoing_dr_smp() 1008 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; in alloc_send_rmpp_list() 1090 mad_send_wr->send_buf.mad = buf; in ib_create_send_mad() 1190 return mad_send_wr->send_buf.mad + in ib_get_payload() [all …]
|
D | cm.c | 1466 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; in ib_send_cm_req() 1507 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; in cm_issue_rej() 1508 rej_msg = (struct cm_rej_msg *) msg->mad; in cm_issue_rej() 1688 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; in cm_format_req_event() 1821 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, in cm_dup_req_handler() 1827 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, in cm_dup_req_handler() 1852 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; in cm_match_req() 1944 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; in cm_req_handler() 2121 rep_msg = (struct cm_rep_msg *) msg->mad; in ib_send_cm_rep() 2189 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, in ib_send_cm_rtu() [all …]
|
D | Makefile | 13 multicast.o mad.o smi.o agent.o mad_rmpp.o \
|
D | mad_priv.h | 82 u8 mad[0]; member
|
/Linux-v5.4/drivers/infiniband/hw/mthca/ |
D | mthca_mad.c | 109 const struct ib_mad *mad, in smp_snoop() argument 114 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in smp_snoop() 115 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in smp_snoop() 116 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { in smp_snoop() 117 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { in smp_snoop() 119 (struct ib_port_info *) ((struct ib_smp *) mad)->data; in smp_snoop() 141 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { in smp_snoop() 151 struct ib_mad *mad) in node_desc_override() argument 153 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || in node_desc_override() 154 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in node_desc_override() [all …]
|
/Linux-v5.4/drivers/scsi/ibmvscsi/ |
D | ibmvfc.c | 149 struct ibmvfc_mad_common *mad = &evt->iu.mad_common; in ibmvfc_trc_start() local 167 entry->op_code = be32_to_cpu(mad->opcode); in ibmvfc_trc_start() 183 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common; in ibmvfc_trc_end() local 204 entry->op_code = be32_to_cpu(mad->opcode); in ibmvfc_trc_end() 205 entry->u.end.status = be16_to_cpu(mad->status); in ibmvfc_trc_end() 1813 struct ibmvfc_passthru_mad *mad; in ibmvfc_bsg_request() local 1892 mad = &evt->iu.passthru; in ibmvfc_bsg_request() 1894 memset(mad, 0, sizeof(*mad)); in ibmvfc_bsg_request() 1895 mad->common.version = cpu_to_be32(1); in ibmvfc_bsg_request() 1896 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); in ibmvfc_bsg_request() [all …]
|
/Linux-v5.4/include/trace/events/ |
D | ib_mad.h | 68 ((struct ib_mad_hdr *)wr->send_buf.mad)->base_version; 70 ((struct ib_mad_hdr *)wr->send_buf.mad)->mgmt_class; 72 ((struct ib_mad_hdr *)wr->send_buf.mad)->class_version; 74 ((struct ib_mad_hdr *)wr->send_buf.mad)->method; 76 ((struct ib_mad_hdr *)wr->send_buf.mad)->status; 78 ((struct ib_mad_hdr *)wr->send_buf.mad)->class_specific; 79 __entry->tid = ((struct ib_mad_hdr *)wr->send_buf.mad)->tid; 81 ((struct ib_mad_hdr *)wr->send_buf.mad)->attr_id; 83 ((struct ib_mad_hdr *)wr->send_buf.mad)->attr_mod; 155 ((struct ib_mad_hdr *)wr->send_buf.mad)->base_version; [all …]
|
/Linux-v5.4/drivers/scsi/ibmvscsi_tgt/ |
D | ibmvscsi_tgt.c | 1503 struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info; in ibmvscsis_adapter_info() local 1509 mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS); in ibmvscsis_adapter_info() 1511 if (be16_to_cpu(mad->common.length) > sizeof(*info)) { in ibmvscsis_adapter_info() 1512 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); in ibmvscsis_adapter_info() 1521 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); in ibmvscsis_adapter_info() 1526 rc = h_copy_rdma(be16_to_cpu(mad->common.length), in ibmvscsis_adapter_info() 1528 be64_to_cpu(mad->buffer), in ibmvscsis_adapter_info() 1539 be64_to_cpu(mad->buffer), vscsi->flags, flag_bits); in ibmvscsis_adapter_info() 1574 be64_to_cpu(mad->buffer)); in ibmvscsis_adapter_info() 1614 struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities; in ibmvscsis_cap_mad() local [all …]
|
/Linux-v5.4/drivers/infiniband/ulp/opa_vnic/ |
D | opa_vnic_vema.c | 632 if (!mad_wc || !mad_wc->recv_buf.mad) in vema_recv() 649 vema_mad = rsp->mad; in vema_recv() 650 memcpy(vema_mad, mad_wc->recv_buf.mad, IB_MGMT_VENDOR_HDR); in vema_recv() 657 switch (mad_wc->recv_buf.mad->mad_hdr.method) { in vema_recv() 659 vema_get(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad, in vema_recv() 663 vema_set(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad, in vema_recv() 809 trap_mad = send_buf->mad; in opa_vnic_vema_send_trap()
|
/Linux-v5.4/drivers/infiniband/sw/rdmavt/ |
D | Makefile | 11 rdmavt-y := vt.o ah.o cq.o mad.o mcast.o mmap.o mr.o pd.o qp.o \
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | Makefile | 5 srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
|
/Linux-v5.4/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 272 static void srpt_get_class_port_info(struct ib_dm_mad *mad) in srpt_get_class_port_info() argument 276 cif = (struct ib_class_port_info *)mad->data; in srpt_get_class_port_info() 282 mad->mad_hdr.status = 0; in srpt_get_class_port_info() 292 static void srpt_get_iou(struct ib_dm_mad *mad) in srpt_get_iou() argument 298 ioui = (struct ib_dm_iou_info *)mad->data; in srpt_get_iou() 307 mad->mad_hdr.status = 0; in srpt_get_iou() 321 struct ib_dm_mad *mad) in srpt_get_ioc() argument 327 iocp = (struct ib_dm_ioc_profile *)mad->data; in srpt_get_ioc() 330 mad->mad_hdr.status in srpt_get_ioc() 336 mad->mad_hdr.status in srpt_get_ioc() [all …]
|
/Linux-v5.4/net/802/ |
D | mrp.c | 239 struct rb_node *parent = app->mad.rb_node; in mrp_attr_lookup() 259 struct rb_node *parent = NULL, **p = &app->mad.rb_node; in mrp_attr_create() 285 rb_insert_color(&attr->node, &app->mad); in mrp_attr_create() 291 rb_erase(&attr->node, &app->mad); in mrp_attr_destroy() 570 for (node = rb_first(&app->mad); in mrp_mad_event() 861 app->mad = RB_ROOT; in mrp_init_applicant()
|
/Linux-v5.4/drivers/infiniband/hw/hfi1/ |
D | Makefile | 25 mad.o \
|
/Linux-v5.4/include/scsi/ |
D | viosrp.h | 197 union mad_iu mad; member
|
/Linux-v5.4/include/net/ |
D | mrp.h | 121 struct rb_root mad; member
|