1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <linux/socket.h>
52 #include <linux/irq_poll.h>
53 #include <uapi/linux/if_ether.h>
54 #include <net/ipv6.h>
55 #include <net/ip.h>
56 #include <linux/string.h>
57 #include <linux/slab.h>
58 #include <linux/netdevice.h>
59
60 #include <linux/if_link.h>
61 #include <linux/atomic.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/uaccess.h>
64 #include <linux/cgroup_rdma.h>
65 #include <uapi/rdma/ib_user_verbs.h>
66 #include <rdma/restrack.h>
67 #include <uapi/rdma/rdma_user_ioctl.h>
68 #include <uapi/rdma/ib_user_ioctl_verbs.h>
69
70 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
71
72 extern struct workqueue_struct *ib_wq;
73 extern struct workqueue_struct *ib_comp_wq;
74
75 union ib_gid {
76 u8 raw[16];
77 struct {
78 __be64 subnet_prefix;
79 __be64 interface_id;
80 } global;
81 };
82
83 extern union ib_gid zgid;
84
85 enum ib_gid_type {
86 /* If link layer is Ethernet, this is RoCE V1 */
87 IB_GID_TYPE_IB = 0,
88 IB_GID_TYPE_ROCE = 0,
89 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
90 IB_GID_TYPE_SIZE
91 };
92
93 #define ROCE_V2_UDP_DPORT 4791
94 struct ib_gid_attr {
95 struct net_device *ndev;
96 struct ib_device *device;
97 union ib_gid gid;
98 enum ib_gid_type gid_type;
99 u16 index;
100 u8 port_num;
101 };
102
103 enum rdma_node_type {
104 /* IB values map to NodeInfo:NodeType. */
105 RDMA_NODE_IB_CA = 1,
106 RDMA_NODE_IB_SWITCH,
107 RDMA_NODE_IB_ROUTER,
108 RDMA_NODE_RNIC,
109 RDMA_NODE_USNIC,
110 RDMA_NODE_USNIC_UDP,
111 };
112
113 enum {
114 /* set the local administered indication */
115 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
116 };
117
118 enum rdma_transport_type {
119 RDMA_TRANSPORT_IB,
120 RDMA_TRANSPORT_IWARP,
121 RDMA_TRANSPORT_USNIC,
122 RDMA_TRANSPORT_USNIC_UDP
123 };
124
125 enum rdma_protocol_type {
126 RDMA_PROTOCOL_IB,
127 RDMA_PROTOCOL_IBOE,
128 RDMA_PROTOCOL_IWARP,
129 RDMA_PROTOCOL_USNIC_UDP
130 };
131
132 __attribute_const__ enum rdma_transport_type
133 rdma_node_get_transport(enum rdma_node_type node_type);
134
135 enum rdma_network_type {
136 RDMA_NETWORK_IB,
137 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
138 RDMA_NETWORK_IPV4,
139 RDMA_NETWORK_IPV6
140 };
141
ib_network_to_gid_type(enum rdma_network_type network_type)142 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
143 {
144 if (network_type == RDMA_NETWORK_IPV4 ||
145 network_type == RDMA_NETWORK_IPV6)
146 return IB_GID_TYPE_ROCE_UDP_ENCAP;
147
148 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
149 return IB_GID_TYPE_IB;
150 }
151
152 static inline enum rdma_network_type
rdma_gid_attr_network_type(const struct ib_gid_attr * attr)153 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
154 {
155 if (attr->gid_type == IB_GID_TYPE_IB)
156 return RDMA_NETWORK_IB;
157
158 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
159 return RDMA_NETWORK_IPV4;
160 else
161 return RDMA_NETWORK_IPV6;
162 }
163
164 enum rdma_link_layer {
165 IB_LINK_LAYER_UNSPECIFIED,
166 IB_LINK_LAYER_INFINIBAND,
167 IB_LINK_LAYER_ETHERNET,
168 };
169
170 enum ib_device_cap_flags {
171 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
172 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
173 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
174 IB_DEVICE_RAW_MULTI = (1 << 3),
175 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
176 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
177 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
178 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
179 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
180 /* Not in use, former INIT_TYPE = (1 << 9),*/
181 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
182 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
183 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
184 IB_DEVICE_SRQ_RESIZE = (1 << 13),
185 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
186
187 /*
188 * This device supports a per-device lkey or stag that can be
189 * used without performing a memory registration for the local
190 * memory. Note that ULPs should never check this flag, but
191 * instead of use the local_dma_lkey flag in the ib_pd structure,
192 * which will always contain a usable lkey.
193 */
194 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
195 /* Reserved, old SEND_W_INV = (1 << 16),*/
196 IB_DEVICE_MEM_WINDOW = (1 << 17),
197 /*
198 * Devices should set IB_DEVICE_UD_IP_SUM if they support
199 * insertion of UDP and TCP checksum on outgoing UD IPoIB
200 * messages and can verify the validity of checksum for
201 * incoming messages. Setting this flag implies that the
202 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
203 */
204 IB_DEVICE_UD_IP_CSUM = (1 << 18),
205 IB_DEVICE_UD_TSO = (1 << 19),
206 IB_DEVICE_XRC = (1 << 20),
207
208 /*
209 * This device supports the IB "base memory management extension",
210 * which includes support for fast registrations (IB_WR_REG_MR,
211 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
212 * also be set by any iWarp device which must support FRs to comply
213 * to the iWarp verbs spec. iWarp devices also support the
214 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
215 * stag.
216 */
217 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
218 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
219 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
220 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
221 IB_DEVICE_RC_IP_CSUM = (1 << 25),
222 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
223 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
224 /*
225 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
226 * support execution of WQEs that involve synchronization
227 * of I/O operations with single completion queue managed
228 * by hardware.
229 */
230 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
231 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
232 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
233 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
234 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
235 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
236 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
237 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
238 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
239 /* The device supports padding incoming writes to cacheline. */
240 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
241 };
242
243 enum ib_signature_prot_cap {
244 IB_PROT_T10DIF_TYPE_1 = 1,
245 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
246 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
247 };
248
249 enum ib_signature_guard_cap {
250 IB_GUARD_T10DIF_CRC = 1,
251 IB_GUARD_T10DIF_CSUM = 1 << 1,
252 };
253
254 enum ib_atomic_cap {
255 IB_ATOMIC_NONE,
256 IB_ATOMIC_HCA,
257 IB_ATOMIC_GLOB
258 };
259
260 enum ib_odp_general_cap_bits {
261 IB_ODP_SUPPORT = 1 << 0,
262 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
263 };
264
265 enum ib_odp_transport_cap_bits {
266 IB_ODP_SUPPORT_SEND = 1 << 0,
267 IB_ODP_SUPPORT_RECV = 1 << 1,
268 IB_ODP_SUPPORT_WRITE = 1 << 2,
269 IB_ODP_SUPPORT_READ = 1 << 3,
270 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
271 };
272
273 struct ib_odp_caps {
274 uint64_t general_caps;
275 struct {
276 uint32_t rc_odp_caps;
277 uint32_t uc_odp_caps;
278 uint32_t ud_odp_caps;
279 } per_transport_caps;
280 };
281
282 struct ib_rss_caps {
283 /* Corresponding bit will be set if qp type from
284 * 'enum ib_qp_type' is supported, e.g.
285 * supported_qpts |= 1 << IB_QPT_UD
286 */
287 u32 supported_qpts;
288 u32 max_rwq_indirection_tables;
289 u32 max_rwq_indirection_table_size;
290 };
291
292 enum ib_tm_cap_flags {
293 /* Support tag matching on RC transport */
294 IB_TM_CAP_RC = 1 << 0,
295 };
296
297 struct ib_tm_caps {
298 /* Max size of RNDV header */
299 u32 max_rndv_hdr_size;
300 /* Max number of entries in tag matching list */
301 u32 max_num_tags;
302 /* From enum ib_tm_cap_flags */
303 u32 flags;
304 /* Max number of outstanding list operations */
305 u32 max_ops;
306 /* Max number of SGE in tag matching entry */
307 u32 max_sge;
308 };
309
310 struct ib_cq_init_attr {
311 unsigned int cqe;
312 int comp_vector;
313 u32 flags;
314 };
315
316 enum ib_cq_attr_mask {
317 IB_CQ_MODERATE = 1 << 0,
318 };
319
320 struct ib_cq_caps {
321 u16 max_cq_moderation_count;
322 u16 max_cq_moderation_period;
323 };
324
325 struct ib_dm_mr_attr {
326 u64 length;
327 u64 offset;
328 u32 access_flags;
329 };
330
331 struct ib_dm_alloc_attr {
332 u64 length;
333 u32 alignment;
334 u32 flags;
335 };
336
337 struct ib_device_attr {
338 u64 fw_ver;
339 __be64 sys_image_guid;
340 u64 max_mr_size;
341 u64 page_size_cap;
342 u32 vendor_id;
343 u32 vendor_part_id;
344 u32 hw_ver;
345 int max_qp;
346 int max_qp_wr;
347 u64 device_cap_flags;
348 int max_send_sge;
349 int max_recv_sge;
350 int max_sge_rd;
351 int max_cq;
352 int max_cqe;
353 int max_mr;
354 int max_pd;
355 int max_qp_rd_atom;
356 int max_ee_rd_atom;
357 int max_res_rd_atom;
358 int max_qp_init_rd_atom;
359 int max_ee_init_rd_atom;
360 enum ib_atomic_cap atomic_cap;
361 enum ib_atomic_cap masked_atomic_cap;
362 int max_ee;
363 int max_rdd;
364 int max_mw;
365 int max_raw_ipv6_qp;
366 int max_raw_ethy_qp;
367 int max_mcast_grp;
368 int max_mcast_qp_attach;
369 int max_total_mcast_qp_attach;
370 int max_ah;
371 int max_fmr;
372 int max_map_per_fmr;
373 int max_srq;
374 int max_srq_wr;
375 int max_srq_sge;
376 unsigned int max_fast_reg_page_list_len;
377 u16 max_pkeys;
378 u8 local_ca_ack_delay;
379 int sig_prot_cap;
380 int sig_guard_cap;
381 struct ib_odp_caps odp_caps;
382 uint64_t timestamp_mask;
383 uint64_t hca_core_clock; /* in KHZ */
384 struct ib_rss_caps rss_caps;
385 u32 max_wq_type_rq;
386 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
387 struct ib_tm_caps tm_caps;
388 struct ib_cq_caps cq_caps;
389 u64 max_dm_size;
390 };
391
392 enum ib_mtu {
393 IB_MTU_256 = 1,
394 IB_MTU_512 = 2,
395 IB_MTU_1024 = 3,
396 IB_MTU_2048 = 4,
397 IB_MTU_4096 = 5
398 };
399
ib_mtu_enum_to_int(enum ib_mtu mtu)400 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
401 {
402 switch (mtu) {
403 case IB_MTU_256: return 256;
404 case IB_MTU_512: return 512;
405 case IB_MTU_1024: return 1024;
406 case IB_MTU_2048: return 2048;
407 case IB_MTU_4096: return 4096;
408 default: return -1;
409 }
410 }
411
ib_mtu_int_to_enum(int mtu)412 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
413 {
414 if (mtu >= 4096)
415 return IB_MTU_4096;
416 else if (mtu >= 2048)
417 return IB_MTU_2048;
418 else if (mtu >= 1024)
419 return IB_MTU_1024;
420 else if (mtu >= 512)
421 return IB_MTU_512;
422 else
423 return IB_MTU_256;
424 }
425
426 enum ib_port_state {
427 IB_PORT_NOP = 0,
428 IB_PORT_DOWN = 1,
429 IB_PORT_INIT = 2,
430 IB_PORT_ARMED = 3,
431 IB_PORT_ACTIVE = 4,
432 IB_PORT_ACTIVE_DEFER = 5
433 };
434
435 enum ib_port_width {
436 IB_WIDTH_1X = 1,
437 IB_WIDTH_4X = 2,
438 IB_WIDTH_8X = 4,
439 IB_WIDTH_12X = 8
440 };
441
ib_width_enum_to_int(enum ib_port_width width)442 static inline int ib_width_enum_to_int(enum ib_port_width width)
443 {
444 switch (width) {
445 case IB_WIDTH_1X: return 1;
446 case IB_WIDTH_4X: return 4;
447 case IB_WIDTH_8X: return 8;
448 case IB_WIDTH_12X: return 12;
449 default: return -1;
450 }
451 }
452
453 enum ib_port_speed {
454 IB_SPEED_SDR = 1,
455 IB_SPEED_DDR = 2,
456 IB_SPEED_QDR = 4,
457 IB_SPEED_FDR10 = 8,
458 IB_SPEED_FDR = 16,
459 IB_SPEED_EDR = 32,
460 IB_SPEED_HDR = 64
461 };
462
463 /**
464 * struct rdma_hw_stats
465 * @lock - Mutex to protect parallel write access to lifespan and values
466 * of counters, which are 64bits and not guaranteeed to be written
467 * atomicaly on 32bits systems.
468 * @timestamp - Used by the core code to track when the last update was
469 * @lifespan - Used by the core code to determine how old the counters
470 * should be before being updated again. Stored in jiffies, defaults
471 * to 10 milliseconds, drivers can override the default be specifying
472 * their own value during their allocation routine.
473 * @name - Array of pointers to static names used for the counters in
474 * directory.
475 * @num_counters - How many hardware counters there are. If name is
476 * shorter than this number, a kernel oops will result. Driver authors
477 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
478 * in their code to prevent this.
479 * @value - Array of u64 counters that are accessed by the sysfs code and
480 * filled in by the drivers get_stats routine
481 */
482 struct rdma_hw_stats {
483 struct mutex lock; /* Protect lifespan and values[] */
484 unsigned long timestamp;
485 unsigned long lifespan;
486 const char * const *names;
487 int num_counters;
488 u64 value[];
489 };
490
491 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
492 /**
493 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
494 * for drivers.
495 * @names - Array of static const char *
496 * @num_counters - How many elements in array
497 * @lifespan - How many milliseconds between updates
498 */
rdma_alloc_hw_stats_struct(const char * const * names,int num_counters,unsigned long lifespan)499 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
500 const char * const *names, int num_counters,
501 unsigned long lifespan)
502 {
503 struct rdma_hw_stats *stats;
504
505 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
506 GFP_KERNEL);
507 if (!stats)
508 return NULL;
509 stats->names = names;
510 stats->num_counters = num_counters;
511 stats->lifespan = msecs_to_jiffies(lifespan);
512
513 return stats;
514 }
515
516
517 /* Define bits for the various functionality this port needs to be supported by
518 * the core.
519 */
520 /* Management 0x00000FFF */
521 #define RDMA_CORE_CAP_IB_MAD 0x00000001
522 #define RDMA_CORE_CAP_IB_SMI 0x00000002
523 #define RDMA_CORE_CAP_IB_CM 0x00000004
524 #define RDMA_CORE_CAP_IW_CM 0x00000008
525 #define RDMA_CORE_CAP_IB_SA 0x00000010
526 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
527
528 /* Address format 0x000FF000 */
529 #define RDMA_CORE_CAP_AF_IB 0x00001000
530 #define RDMA_CORE_CAP_ETH_AH 0x00002000
531 #define RDMA_CORE_CAP_OPA_AH 0x00004000
532 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
533
534 /* Protocol 0xFFF00000 */
535 #define RDMA_CORE_CAP_PROT_IB 0x00100000
536 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
537 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
538 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
539 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
540 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000
541
542 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
543 | RDMA_CORE_CAP_PROT_ROCE \
544 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
545
546 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
547 | RDMA_CORE_CAP_IB_MAD \
548 | RDMA_CORE_CAP_IB_SMI \
549 | RDMA_CORE_CAP_IB_CM \
550 | RDMA_CORE_CAP_IB_SA \
551 | RDMA_CORE_CAP_AF_IB)
552 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
553 | RDMA_CORE_CAP_IB_MAD \
554 | RDMA_CORE_CAP_IB_CM \
555 | RDMA_CORE_CAP_AF_IB \
556 | RDMA_CORE_CAP_ETH_AH)
557 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
558 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
559 | RDMA_CORE_CAP_IB_MAD \
560 | RDMA_CORE_CAP_IB_CM \
561 | RDMA_CORE_CAP_AF_IB \
562 | RDMA_CORE_CAP_ETH_AH)
563 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
564 | RDMA_CORE_CAP_IW_CM)
565 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
566 | RDMA_CORE_CAP_OPA_MAD)
567
568 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
569
570 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
571
572 struct ib_port_attr {
573 u64 subnet_prefix;
574 enum ib_port_state state;
575 enum ib_mtu max_mtu;
576 enum ib_mtu active_mtu;
577 int gid_tbl_len;
578 unsigned int ip_gids:1;
579 /* This is the value from PortInfo CapabilityMask, defined by IBA */
580 u32 port_cap_flags;
581 u32 max_msg_sz;
582 u32 bad_pkey_cntr;
583 u32 qkey_viol_cntr;
584 u16 pkey_tbl_len;
585 u32 sm_lid;
586 u32 lid;
587 u8 lmc;
588 u8 max_vl_num;
589 u8 sm_sl;
590 u8 subnet_timeout;
591 u8 init_type_reply;
592 u8 active_width;
593 u8 active_speed;
594 u8 phys_state;
595 };
596
597 enum ib_device_modify_flags {
598 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
599 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
600 };
601
602 #define IB_DEVICE_NODE_DESC_MAX 64
603
604 struct ib_device_modify {
605 u64 sys_image_guid;
606 char node_desc[IB_DEVICE_NODE_DESC_MAX];
607 };
608
609 enum ib_port_modify_flags {
610 IB_PORT_SHUTDOWN = 1,
611 IB_PORT_INIT_TYPE = (1<<2),
612 IB_PORT_RESET_QKEY_CNTR = (1<<3),
613 IB_PORT_OPA_MASK_CHG = (1<<4)
614 };
615
616 struct ib_port_modify {
617 u32 set_port_cap_mask;
618 u32 clr_port_cap_mask;
619 u8 init_type;
620 };
621
622 enum ib_event_type {
623 IB_EVENT_CQ_ERR,
624 IB_EVENT_QP_FATAL,
625 IB_EVENT_QP_REQ_ERR,
626 IB_EVENT_QP_ACCESS_ERR,
627 IB_EVENT_COMM_EST,
628 IB_EVENT_SQ_DRAINED,
629 IB_EVENT_PATH_MIG,
630 IB_EVENT_PATH_MIG_ERR,
631 IB_EVENT_DEVICE_FATAL,
632 IB_EVENT_PORT_ACTIVE,
633 IB_EVENT_PORT_ERR,
634 IB_EVENT_LID_CHANGE,
635 IB_EVENT_PKEY_CHANGE,
636 IB_EVENT_SM_CHANGE,
637 IB_EVENT_SRQ_ERR,
638 IB_EVENT_SRQ_LIMIT_REACHED,
639 IB_EVENT_QP_LAST_WQE_REACHED,
640 IB_EVENT_CLIENT_REREGISTER,
641 IB_EVENT_GID_CHANGE,
642 IB_EVENT_WQ_FATAL,
643 };
644
645 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
646
647 struct ib_event {
648 struct ib_device *device;
649 union {
650 struct ib_cq *cq;
651 struct ib_qp *qp;
652 struct ib_srq *srq;
653 struct ib_wq *wq;
654 u8 port_num;
655 } element;
656 enum ib_event_type event;
657 };
658
659 struct ib_event_handler {
660 struct ib_device *device;
661 void (*handler)(struct ib_event_handler *, struct ib_event *);
662 struct list_head list;
663 };
664
665 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
666 do { \
667 (_ptr)->device = _device; \
668 (_ptr)->handler = _handler; \
669 INIT_LIST_HEAD(&(_ptr)->list); \
670 } while (0)
671
672 struct ib_global_route {
673 const struct ib_gid_attr *sgid_attr;
674 union ib_gid dgid;
675 u32 flow_label;
676 u8 sgid_index;
677 u8 hop_limit;
678 u8 traffic_class;
679 };
680
681 struct ib_grh {
682 __be32 version_tclass_flow;
683 __be16 paylen;
684 u8 next_hdr;
685 u8 hop_limit;
686 union ib_gid sgid;
687 union ib_gid dgid;
688 };
689
690 union rdma_network_hdr {
691 struct ib_grh ibgrh;
692 struct {
693 /* The IB spec states that if it's IPv4, the header
694 * is located in the last 20 bytes of the header.
695 */
696 u8 reserved[20];
697 struct iphdr roce4grh;
698 };
699 };
700
701 #define IB_QPN_MASK 0xFFFFFF
702
703 enum {
704 IB_MULTICAST_QPN = 0xffffff
705 };
706
707 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
708 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
709
710 enum ib_ah_flags {
711 IB_AH_GRH = 1
712 };
713
714 enum ib_rate {
715 IB_RATE_PORT_CURRENT = 0,
716 IB_RATE_2_5_GBPS = 2,
717 IB_RATE_5_GBPS = 5,
718 IB_RATE_10_GBPS = 3,
719 IB_RATE_20_GBPS = 6,
720 IB_RATE_30_GBPS = 4,
721 IB_RATE_40_GBPS = 7,
722 IB_RATE_60_GBPS = 8,
723 IB_RATE_80_GBPS = 9,
724 IB_RATE_120_GBPS = 10,
725 IB_RATE_14_GBPS = 11,
726 IB_RATE_56_GBPS = 12,
727 IB_RATE_112_GBPS = 13,
728 IB_RATE_168_GBPS = 14,
729 IB_RATE_25_GBPS = 15,
730 IB_RATE_100_GBPS = 16,
731 IB_RATE_200_GBPS = 17,
732 IB_RATE_300_GBPS = 18
733 };
734
735 /**
736 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
737 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
738 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
739 * @rate: rate to convert.
740 */
741 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
742
743 /**
744 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
745 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
746 * @rate: rate to convert.
747 */
748 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
749
750
751 /**
752 * enum ib_mr_type - memory region type
753 * @IB_MR_TYPE_MEM_REG: memory region that is used for
754 * normal registration
755 * @IB_MR_TYPE_SIGNATURE: memory region that is used for
756 * signature operations (data-integrity
757 * capable regions)
758 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
759 * register any arbitrary sg lists (without
760 * the normal mr constraints - see
761 * ib_map_mr_sg)
762 */
763 enum ib_mr_type {
764 IB_MR_TYPE_MEM_REG,
765 IB_MR_TYPE_SIGNATURE,
766 IB_MR_TYPE_SG_GAPS,
767 };
768
769 /**
770 * Signature types
771 * IB_SIG_TYPE_NONE: Unprotected.
772 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
773 */
774 enum ib_signature_type {
775 IB_SIG_TYPE_NONE,
776 IB_SIG_TYPE_T10_DIF,
777 };
778
779 /**
780 * Signature T10-DIF block-guard types
781 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
782 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
783 */
784 enum ib_t10_dif_bg_type {
785 IB_T10DIF_CRC,
786 IB_T10DIF_CSUM
787 };
788
789 /**
790 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
791 * domain.
792 * @bg_type: T10-DIF block guard type (CRC|CSUM)
793 * @pi_interval: protection information interval.
794 * @bg: seed of guard computation.
795 * @app_tag: application tag of guard block
796 * @ref_tag: initial guard block reference tag.
797 * @ref_remap: Indicate wethear the reftag increments each block
798 * @app_escape: Indicate to skip block check if apptag=0xffff
799 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
800 * @apptag_check_mask: check bitmask of application tag.
801 */
802 struct ib_t10_dif_domain {
803 enum ib_t10_dif_bg_type bg_type;
804 u16 pi_interval;
805 u16 bg;
806 u16 app_tag;
807 u32 ref_tag;
808 bool ref_remap;
809 bool app_escape;
810 bool ref_escape;
811 u16 apptag_check_mask;
812 };
813
814 /**
815 * struct ib_sig_domain - Parameters for signature domain
816 * @sig_type: specific signauture type
817 * @sig: union of all signature domain attributes that may
818 * be used to set domain layout.
819 */
820 struct ib_sig_domain {
821 enum ib_signature_type sig_type;
822 union {
823 struct ib_t10_dif_domain dif;
824 } sig;
825 };
826
827 /**
828 * struct ib_sig_attrs - Parameters for signature handover operation
829 * @check_mask: bitmask for signature byte check (8 bytes)
830 * @mem: memory domain layout desciptor.
831 * @wire: wire domain layout desciptor.
832 */
833 struct ib_sig_attrs {
834 u8 check_mask;
835 struct ib_sig_domain mem;
836 struct ib_sig_domain wire;
837 };
838
839 enum ib_sig_err_type {
840 IB_SIG_BAD_GUARD,
841 IB_SIG_BAD_REFTAG,
842 IB_SIG_BAD_APPTAG,
843 };
844
845 /**
846 * Signature check masks (8 bytes in total) according to the T10-PI standard:
847 * -------- -------- ------------
848 * | GUARD | APPTAG | REFTAG |
849 * | 2B | 2B | 4B |
850 * -------- -------- ------------
851 */
852 enum {
853 IB_SIG_CHECK_GUARD = 0xc0,
854 IB_SIG_CHECK_APPTAG = 0x30,
855 IB_SIG_CHECK_REFTAG = 0x0f,
856 };
857
858 /**
859 * struct ib_sig_err - signature error descriptor
860 */
861 struct ib_sig_err {
862 enum ib_sig_err_type err_type;
863 u32 expected;
864 u32 actual;
865 u64 sig_err_offset;
866 u32 key;
867 };
868
869 enum ib_mr_status_check {
870 IB_MR_CHECK_SIG_STATUS = 1,
871 };
872
873 /**
874 * struct ib_mr_status - Memory region status container
875 *
876 * @fail_status: Bitmask of MR checks status. For each
877 * failed check a corresponding status bit is set.
878 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
879 * failure.
880 */
881 struct ib_mr_status {
882 u32 fail_status;
883 struct ib_sig_err sig_err;
884 };
885
886 /**
887 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
888 * enum.
889 * @mult: multiple to convert.
890 */
891 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
892
893 enum rdma_ah_attr_type {
894 RDMA_AH_ATTR_TYPE_UNDEFINED,
895 RDMA_AH_ATTR_TYPE_IB,
896 RDMA_AH_ATTR_TYPE_ROCE,
897 RDMA_AH_ATTR_TYPE_OPA,
898 };
899
900 struct ib_ah_attr {
901 u16 dlid;
902 u8 src_path_bits;
903 };
904
905 struct roce_ah_attr {
906 u8 dmac[ETH_ALEN];
907 };
908
909 struct opa_ah_attr {
910 u32 dlid;
911 u8 src_path_bits;
912 bool make_grd;
913 };
914
915 struct rdma_ah_attr {
916 struct ib_global_route grh;
917 u8 sl;
918 u8 static_rate;
919 u8 port_num;
920 u8 ah_flags;
921 enum rdma_ah_attr_type type;
922 union {
923 struct ib_ah_attr ib;
924 struct roce_ah_attr roce;
925 struct opa_ah_attr opa;
926 };
927 };
928
929 enum ib_wc_status {
930 IB_WC_SUCCESS,
931 IB_WC_LOC_LEN_ERR,
932 IB_WC_LOC_QP_OP_ERR,
933 IB_WC_LOC_EEC_OP_ERR,
934 IB_WC_LOC_PROT_ERR,
935 IB_WC_WR_FLUSH_ERR,
936 IB_WC_MW_BIND_ERR,
937 IB_WC_BAD_RESP_ERR,
938 IB_WC_LOC_ACCESS_ERR,
939 IB_WC_REM_INV_REQ_ERR,
940 IB_WC_REM_ACCESS_ERR,
941 IB_WC_REM_OP_ERR,
942 IB_WC_RETRY_EXC_ERR,
943 IB_WC_RNR_RETRY_EXC_ERR,
944 IB_WC_LOC_RDD_VIOL_ERR,
945 IB_WC_REM_INV_RD_REQ_ERR,
946 IB_WC_REM_ABORT_ERR,
947 IB_WC_INV_EECN_ERR,
948 IB_WC_INV_EEC_STATE_ERR,
949 IB_WC_FATAL_ERR,
950 IB_WC_RESP_TIMEOUT_ERR,
951 IB_WC_GENERAL_ERR
952 };
953
954 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
955
956 enum ib_wc_opcode {
957 IB_WC_SEND,
958 IB_WC_RDMA_WRITE,
959 IB_WC_RDMA_READ,
960 IB_WC_COMP_SWAP,
961 IB_WC_FETCH_ADD,
962 IB_WC_LSO,
963 IB_WC_LOCAL_INV,
964 IB_WC_REG_MR,
965 IB_WC_MASKED_COMP_SWAP,
966 IB_WC_MASKED_FETCH_ADD,
967 /*
968 * Set value of IB_WC_RECV so consumers can test if a completion is a
969 * receive by testing (opcode & IB_WC_RECV).
970 */
971 IB_WC_RECV = 1 << 7,
972 IB_WC_RECV_RDMA_WITH_IMM
973 };
974
975 enum ib_wc_flags {
976 IB_WC_GRH = 1,
977 IB_WC_WITH_IMM = (1<<1),
978 IB_WC_WITH_INVALIDATE = (1<<2),
979 IB_WC_IP_CSUM_OK = (1<<3),
980 IB_WC_WITH_SMAC = (1<<4),
981 IB_WC_WITH_VLAN = (1<<5),
982 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
983 };
984
985 struct ib_wc {
986 union {
987 u64 wr_id;
988 struct ib_cqe *wr_cqe;
989 };
990 enum ib_wc_status status;
991 enum ib_wc_opcode opcode;
992 u32 vendor_err;
993 u32 byte_len;
994 struct ib_qp *qp;
995 union {
996 __be32 imm_data;
997 u32 invalidate_rkey;
998 } ex;
999 u32 src_qp;
1000 u32 slid;
1001 int wc_flags;
1002 u16 pkey_index;
1003 u8 sl;
1004 u8 dlid_path_bits;
1005 u8 port_num; /* valid only for DR SMPs on switches */
1006 u8 smac[ETH_ALEN];
1007 u16 vlan_id;
1008 u8 network_hdr_type;
1009 };
1010
1011 enum ib_cq_notify_flags {
1012 IB_CQ_SOLICITED = 1 << 0,
1013 IB_CQ_NEXT_COMP = 1 << 1,
1014 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1015 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1016 };
1017
1018 enum ib_srq_type {
1019 IB_SRQT_BASIC,
1020 IB_SRQT_XRC,
1021 IB_SRQT_TM,
1022 };
1023
ib_srq_has_cq(enum ib_srq_type srq_type)1024 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1025 {
1026 return srq_type == IB_SRQT_XRC ||
1027 srq_type == IB_SRQT_TM;
1028 }
1029
1030 enum ib_srq_attr_mask {
1031 IB_SRQ_MAX_WR = 1 << 0,
1032 IB_SRQ_LIMIT = 1 << 1,
1033 };
1034
1035 struct ib_srq_attr {
1036 u32 max_wr;
1037 u32 max_sge;
1038 u32 srq_limit;
1039 };
1040
1041 struct ib_srq_init_attr {
1042 void (*event_handler)(struct ib_event *, void *);
1043 void *srq_context;
1044 struct ib_srq_attr attr;
1045 enum ib_srq_type srq_type;
1046
1047 struct {
1048 struct ib_cq *cq;
1049 union {
1050 struct {
1051 struct ib_xrcd *xrcd;
1052 } xrc;
1053
1054 struct {
1055 u32 max_num_tags;
1056 } tag_matching;
1057 };
1058 } ext;
1059 };
1060
1061 struct ib_qp_cap {
1062 u32 max_send_wr;
1063 u32 max_recv_wr;
1064 u32 max_send_sge;
1065 u32 max_recv_sge;
1066 u32 max_inline_data;
1067
1068 /*
1069 * Maximum number of rdma_rw_ctx structures in flight at a time.
1070 * ib_create_qp() will calculate the right amount of neededed WRs
1071 * and MRs based on this.
1072 */
1073 u32 max_rdma_ctxs;
1074 };
1075
1076 enum ib_sig_type {
1077 IB_SIGNAL_ALL_WR,
1078 IB_SIGNAL_REQ_WR
1079 };
1080
1081 enum ib_qp_type {
1082 /*
1083 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1084 * here (and in that order) since the MAD layer uses them as
1085 * indices into a 2-entry table.
1086 */
1087 IB_QPT_SMI,
1088 IB_QPT_GSI,
1089
1090 IB_QPT_RC,
1091 IB_QPT_UC,
1092 IB_QPT_UD,
1093 IB_QPT_RAW_IPV6,
1094 IB_QPT_RAW_ETHERTYPE,
1095 IB_QPT_RAW_PACKET = 8,
1096 IB_QPT_XRC_INI = 9,
1097 IB_QPT_XRC_TGT,
1098 IB_QPT_MAX,
1099 IB_QPT_DRIVER = 0xFF,
1100 /* Reserve a range for qp types internal to the low level driver.
1101 * These qp types will not be visible at the IB core layer, so the
1102 * IB_QPT_MAX usages should not be affected in the core layer
1103 */
1104 IB_QPT_RESERVED1 = 0x1000,
1105 IB_QPT_RESERVED2,
1106 IB_QPT_RESERVED3,
1107 IB_QPT_RESERVED4,
1108 IB_QPT_RESERVED5,
1109 IB_QPT_RESERVED6,
1110 IB_QPT_RESERVED7,
1111 IB_QPT_RESERVED8,
1112 IB_QPT_RESERVED9,
1113 IB_QPT_RESERVED10,
1114 };
1115
1116 enum ib_qp_create_flags {
1117 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1118 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1119 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1120 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1121 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1122 IB_QP_CREATE_NETIF_QP = 1 << 5,
1123 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
1124 /* FREE = 1 << 7, */
1125 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1126 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1127 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1128 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
1129 /* reserve bits 26-31 for low level drivers' internal use */
1130 IB_QP_CREATE_RESERVED_START = 1 << 26,
1131 IB_QP_CREATE_RESERVED_END = 1 << 31,
1132 };
1133
1134 /*
1135 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1136 * callback to destroy the passed in QP.
1137 */
1138
1139 struct ib_qp_init_attr {
1140 void (*event_handler)(struct ib_event *, void *);
1141 void *qp_context;
1142 struct ib_cq *send_cq;
1143 struct ib_cq *recv_cq;
1144 struct ib_srq *srq;
1145 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1146 struct ib_qp_cap cap;
1147 enum ib_sig_type sq_sig_type;
1148 enum ib_qp_type qp_type;
1149 enum ib_qp_create_flags create_flags;
1150
1151 /*
1152 * Only needed for special QP types, or when using the RW API.
1153 */
1154 u8 port_num;
1155 struct ib_rwq_ind_table *rwq_ind_tbl;
1156 u32 source_qpn;
1157 };
1158
1159 struct ib_qp_open_attr {
1160 void (*event_handler)(struct ib_event *, void *);
1161 void *qp_context;
1162 u32 qp_num;
1163 enum ib_qp_type qp_type;
1164 };
1165
1166 enum ib_rnr_timeout {
1167 IB_RNR_TIMER_655_36 = 0,
1168 IB_RNR_TIMER_000_01 = 1,
1169 IB_RNR_TIMER_000_02 = 2,
1170 IB_RNR_TIMER_000_03 = 3,
1171 IB_RNR_TIMER_000_04 = 4,
1172 IB_RNR_TIMER_000_06 = 5,
1173 IB_RNR_TIMER_000_08 = 6,
1174 IB_RNR_TIMER_000_12 = 7,
1175 IB_RNR_TIMER_000_16 = 8,
1176 IB_RNR_TIMER_000_24 = 9,
1177 IB_RNR_TIMER_000_32 = 10,
1178 IB_RNR_TIMER_000_48 = 11,
1179 IB_RNR_TIMER_000_64 = 12,
1180 IB_RNR_TIMER_000_96 = 13,
1181 IB_RNR_TIMER_001_28 = 14,
1182 IB_RNR_TIMER_001_92 = 15,
1183 IB_RNR_TIMER_002_56 = 16,
1184 IB_RNR_TIMER_003_84 = 17,
1185 IB_RNR_TIMER_005_12 = 18,
1186 IB_RNR_TIMER_007_68 = 19,
1187 IB_RNR_TIMER_010_24 = 20,
1188 IB_RNR_TIMER_015_36 = 21,
1189 IB_RNR_TIMER_020_48 = 22,
1190 IB_RNR_TIMER_030_72 = 23,
1191 IB_RNR_TIMER_040_96 = 24,
1192 IB_RNR_TIMER_061_44 = 25,
1193 IB_RNR_TIMER_081_92 = 26,
1194 IB_RNR_TIMER_122_88 = 27,
1195 IB_RNR_TIMER_163_84 = 28,
1196 IB_RNR_TIMER_245_76 = 29,
1197 IB_RNR_TIMER_327_68 = 30,
1198 IB_RNR_TIMER_491_52 = 31
1199 };
1200
1201 enum ib_qp_attr_mask {
1202 IB_QP_STATE = 1,
1203 IB_QP_CUR_STATE = (1<<1),
1204 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1205 IB_QP_ACCESS_FLAGS = (1<<3),
1206 IB_QP_PKEY_INDEX = (1<<4),
1207 IB_QP_PORT = (1<<5),
1208 IB_QP_QKEY = (1<<6),
1209 IB_QP_AV = (1<<7),
1210 IB_QP_PATH_MTU = (1<<8),
1211 IB_QP_TIMEOUT = (1<<9),
1212 IB_QP_RETRY_CNT = (1<<10),
1213 IB_QP_RNR_RETRY = (1<<11),
1214 IB_QP_RQ_PSN = (1<<12),
1215 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1216 IB_QP_ALT_PATH = (1<<14),
1217 IB_QP_MIN_RNR_TIMER = (1<<15),
1218 IB_QP_SQ_PSN = (1<<16),
1219 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1220 IB_QP_PATH_MIG_STATE = (1<<18),
1221 IB_QP_CAP = (1<<19),
1222 IB_QP_DEST_QPN = (1<<20),
1223 IB_QP_RESERVED1 = (1<<21),
1224 IB_QP_RESERVED2 = (1<<22),
1225 IB_QP_RESERVED3 = (1<<23),
1226 IB_QP_RESERVED4 = (1<<24),
1227 IB_QP_RATE_LIMIT = (1<<25),
1228 };
1229
1230 enum ib_qp_state {
1231 IB_QPS_RESET,
1232 IB_QPS_INIT,
1233 IB_QPS_RTR,
1234 IB_QPS_RTS,
1235 IB_QPS_SQD,
1236 IB_QPS_SQE,
1237 IB_QPS_ERR
1238 };
1239
1240 enum ib_mig_state {
1241 IB_MIG_MIGRATED,
1242 IB_MIG_REARM,
1243 IB_MIG_ARMED
1244 };
1245
1246 enum ib_mw_type {
1247 IB_MW_TYPE_1 = 1,
1248 IB_MW_TYPE_2 = 2
1249 };
1250
1251 struct ib_qp_attr {
1252 enum ib_qp_state qp_state;
1253 enum ib_qp_state cur_qp_state;
1254 enum ib_mtu path_mtu;
1255 enum ib_mig_state path_mig_state;
1256 u32 qkey;
1257 u32 rq_psn;
1258 u32 sq_psn;
1259 u32 dest_qp_num;
1260 int qp_access_flags;
1261 struct ib_qp_cap cap;
1262 struct rdma_ah_attr ah_attr;
1263 struct rdma_ah_attr alt_ah_attr;
1264 u16 pkey_index;
1265 u16 alt_pkey_index;
1266 u8 en_sqd_async_notify;
1267 u8 sq_draining;
1268 u8 max_rd_atomic;
1269 u8 max_dest_rd_atomic;
1270 u8 min_rnr_timer;
1271 u8 port_num;
1272 u8 timeout;
1273 u8 retry_cnt;
1274 u8 rnr_retry;
1275 u8 alt_port_num;
1276 u8 alt_timeout;
1277 u32 rate_limit;
1278 };
1279
1280 enum ib_wr_opcode {
1281 IB_WR_RDMA_WRITE,
1282 IB_WR_RDMA_WRITE_WITH_IMM,
1283 IB_WR_SEND,
1284 IB_WR_SEND_WITH_IMM,
1285 IB_WR_RDMA_READ,
1286 IB_WR_ATOMIC_CMP_AND_SWP,
1287 IB_WR_ATOMIC_FETCH_AND_ADD,
1288 IB_WR_LSO,
1289 IB_WR_SEND_WITH_INV,
1290 IB_WR_RDMA_READ_WITH_INV,
1291 IB_WR_LOCAL_INV,
1292 IB_WR_REG_MR,
1293 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1294 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1295 IB_WR_REG_SIG_MR,
1296 /* reserve values for low level drivers' internal use.
1297 * These values will not be used at all in the ib core layer.
1298 */
1299 IB_WR_RESERVED1 = 0xf0,
1300 IB_WR_RESERVED2,
1301 IB_WR_RESERVED3,
1302 IB_WR_RESERVED4,
1303 IB_WR_RESERVED5,
1304 IB_WR_RESERVED6,
1305 IB_WR_RESERVED7,
1306 IB_WR_RESERVED8,
1307 IB_WR_RESERVED9,
1308 IB_WR_RESERVED10,
1309 };
1310
1311 enum ib_send_flags {
1312 IB_SEND_FENCE = 1,
1313 IB_SEND_SIGNALED = (1<<1),
1314 IB_SEND_SOLICITED = (1<<2),
1315 IB_SEND_INLINE = (1<<3),
1316 IB_SEND_IP_CSUM = (1<<4),
1317
1318 /* reserve bits 26-31 for low level drivers' internal use */
1319 IB_SEND_RESERVED_START = (1 << 26),
1320 IB_SEND_RESERVED_END = (1 << 31),
1321 };
1322
1323 struct ib_sge {
1324 u64 addr;
1325 u32 length;
1326 u32 lkey;
1327 };
1328
1329 struct ib_cqe {
1330 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1331 };
1332
1333 struct ib_send_wr {
1334 struct ib_send_wr *next;
1335 union {
1336 u64 wr_id;
1337 struct ib_cqe *wr_cqe;
1338 };
1339 struct ib_sge *sg_list;
1340 int num_sge;
1341 enum ib_wr_opcode opcode;
1342 int send_flags;
1343 union {
1344 __be32 imm_data;
1345 u32 invalidate_rkey;
1346 } ex;
1347 };
1348
1349 struct ib_rdma_wr {
1350 struct ib_send_wr wr;
1351 u64 remote_addr;
1352 u32 rkey;
1353 };
1354
rdma_wr(const struct ib_send_wr * wr)1355 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1356 {
1357 return container_of(wr, struct ib_rdma_wr, wr);
1358 }
1359
1360 struct ib_atomic_wr {
1361 struct ib_send_wr wr;
1362 u64 remote_addr;
1363 u64 compare_add;
1364 u64 swap;
1365 u64 compare_add_mask;
1366 u64 swap_mask;
1367 u32 rkey;
1368 };
1369
atomic_wr(const struct ib_send_wr * wr)1370 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1371 {
1372 return container_of(wr, struct ib_atomic_wr, wr);
1373 }
1374
1375 struct ib_ud_wr {
1376 struct ib_send_wr wr;
1377 struct ib_ah *ah;
1378 void *header;
1379 int hlen;
1380 int mss;
1381 u32 remote_qpn;
1382 u32 remote_qkey;
1383 u16 pkey_index; /* valid for GSI only */
1384 u8 port_num; /* valid for DR SMPs on switch only */
1385 };
1386
ud_wr(const struct ib_send_wr * wr)1387 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1388 {
1389 return container_of(wr, struct ib_ud_wr, wr);
1390 }
1391
1392 struct ib_reg_wr {
1393 struct ib_send_wr wr;
1394 struct ib_mr *mr;
1395 u32 key;
1396 int access;
1397 };
1398
reg_wr(const struct ib_send_wr * wr)1399 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1400 {
1401 return container_of(wr, struct ib_reg_wr, wr);
1402 }
1403
1404 struct ib_sig_handover_wr {
1405 struct ib_send_wr wr;
1406 struct ib_sig_attrs *sig_attrs;
1407 struct ib_mr *sig_mr;
1408 int access_flags;
1409 struct ib_sge *prot;
1410 };
1411
1412 static inline const struct ib_sig_handover_wr *
sig_handover_wr(const struct ib_send_wr * wr)1413 sig_handover_wr(const struct ib_send_wr *wr)
1414 {
1415 return container_of(wr, struct ib_sig_handover_wr, wr);
1416 }
1417
1418 struct ib_recv_wr {
1419 struct ib_recv_wr *next;
1420 union {
1421 u64 wr_id;
1422 struct ib_cqe *wr_cqe;
1423 };
1424 struct ib_sge *sg_list;
1425 int num_sge;
1426 };
1427
1428 enum ib_access_flags {
1429 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1430 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1431 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1432 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1433 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1434 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1435 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1436 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1437
1438 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
1439 };
1440
1441 /*
1442 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1443 * are hidden here instead of a uapi header!
1444 */
1445 enum ib_mr_rereg_flags {
1446 IB_MR_REREG_TRANS = 1,
1447 IB_MR_REREG_PD = (1<<1),
1448 IB_MR_REREG_ACCESS = (1<<2),
1449 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1450 };
1451
1452 struct ib_fmr_attr {
1453 int max_pages;
1454 int max_maps;
1455 u8 page_shift;
1456 };
1457
1458 struct ib_umem;
1459
1460 enum rdma_remove_reason {
1461 /*
1462 * Userspace requested uobject deletion or initial try
1463 * to remove uobject via cleanup. Call could fail
1464 */
1465 RDMA_REMOVE_DESTROY,
1466 /* Context deletion. This call should delete the actual object itself */
1467 RDMA_REMOVE_CLOSE,
1468 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1469 RDMA_REMOVE_DRIVER_REMOVE,
1470 /* uobj is being cleaned-up before being committed */
1471 RDMA_REMOVE_ABORT,
1472 };
1473
1474 struct ib_rdmacg_object {
1475 #ifdef CONFIG_CGROUP_RDMA
1476 struct rdma_cgroup *cg; /* owner rdma cgroup */
1477 #endif
1478 };
1479
1480 struct ib_ucontext {
1481 struct ib_device *device;
1482 struct ib_uverbs_file *ufile;
1483 /*
1484 * 'closing' can be read by the driver only during a destroy callback,
1485 * it is set when we are closing the file descriptor and indicates
1486 * that mm_sem may be locked.
1487 */
1488 int closing;
1489
1490 bool cleanup_retryable;
1491
1492 struct pid *tgid;
1493 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1494 struct rb_root_cached umem_tree;
1495 /*
1496 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1497 * mmu notifiers registration.
1498 */
1499 struct rw_semaphore umem_rwsem;
1500 void (*invalidate_range)(struct ib_umem *umem,
1501 unsigned long start, unsigned long end);
1502
1503 struct mmu_notifier mn;
1504 atomic_t notifier_count;
1505 /* A list of umems that don't have private mmu notifier counters yet. */
1506 struct list_head no_private_counters;
1507 int odp_mrs_count;
1508 #endif
1509
1510 struct ib_rdmacg_object cg_obj;
1511 };
1512
1513 struct ib_uobject {
1514 u64 user_handle; /* handle given to us by userspace */
1515 /* ufile & ucontext owning this object */
1516 struct ib_uverbs_file *ufile;
1517 /* FIXME, save memory: ufile->context == context */
1518 struct ib_ucontext *context; /* associated user context */
1519 void *object; /* containing object */
1520 struct list_head list; /* link to context's list */
1521 struct ib_rdmacg_object cg_obj; /* rdmacg object */
1522 int id; /* index into kernel idr */
1523 struct kref ref;
1524 atomic_t usecnt; /* protects exclusive access */
1525 struct rcu_head rcu; /* kfree_rcu() overhead */
1526
1527 const struct uverbs_api_object *uapi_object;
1528 };
1529
1530 struct ib_udata {
1531 const void __user *inbuf;
1532 void __user *outbuf;
1533 size_t inlen;
1534 size_t outlen;
1535 };
1536
1537 struct ib_pd {
1538 u32 local_dma_lkey;
1539 u32 flags;
1540 struct ib_device *device;
1541 struct ib_uobject *uobject;
1542 atomic_t usecnt; /* count all resources */
1543
1544 u32 unsafe_global_rkey;
1545
1546 /*
1547 * Implementation details of the RDMA core, don't use in drivers:
1548 */
1549 struct ib_mr *__internal_mr;
1550 struct rdma_restrack_entry res;
1551 };
1552
1553 struct ib_xrcd {
1554 struct ib_device *device;
1555 atomic_t usecnt; /* count all exposed resources */
1556 struct inode *inode;
1557
1558 struct mutex tgt_qp_mutex;
1559 struct list_head tgt_qp_list;
1560 };
1561
1562 struct ib_ah {
1563 struct ib_device *device;
1564 struct ib_pd *pd;
1565 struct ib_uobject *uobject;
1566 const struct ib_gid_attr *sgid_attr;
1567 enum rdma_ah_attr_type type;
1568 };
1569
1570 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1571
1572 enum ib_poll_context {
1573 IB_POLL_DIRECT, /* caller context, no hw completions */
1574 IB_POLL_SOFTIRQ, /* poll from softirq context */
1575 IB_POLL_WORKQUEUE, /* poll from workqueue */
1576 };
1577
1578 struct ib_cq {
1579 struct ib_device *device;
1580 struct ib_uobject *uobject;
1581 ib_comp_handler comp_handler;
1582 void (*event_handler)(struct ib_event *, void *);
1583 void *cq_context;
1584 int cqe;
1585 atomic_t usecnt; /* count number of work queues */
1586 enum ib_poll_context poll_ctx;
1587 struct ib_wc *wc;
1588 union {
1589 struct irq_poll iop;
1590 struct work_struct work;
1591 };
1592 /*
1593 * Implementation details of the RDMA core, don't use in drivers:
1594 */
1595 struct rdma_restrack_entry res;
1596 };
1597
1598 struct ib_srq {
1599 struct ib_device *device;
1600 struct ib_pd *pd;
1601 struct ib_uobject *uobject;
1602 void (*event_handler)(struct ib_event *, void *);
1603 void *srq_context;
1604 enum ib_srq_type srq_type;
1605 atomic_t usecnt;
1606
1607 struct {
1608 struct ib_cq *cq;
1609 union {
1610 struct {
1611 struct ib_xrcd *xrcd;
1612 u32 srq_num;
1613 } xrc;
1614 };
1615 } ext;
1616 };
1617
1618 enum ib_raw_packet_caps {
1619 /* Strip cvlan from incoming packet and report it in the matching work
1620 * completion is supported.
1621 */
1622 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1623 /* Scatter FCS field of an incoming packet to host memory is supported.
1624 */
1625 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1626 /* Checksum offloads are supported (for both send and receive). */
1627 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1628 /* When a packet is received for an RQ with no receive WQEs, the
1629 * packet processing is delayed.
1630 */
1631 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1632 };
1633
1634 enum ib_wq_type {
1635 IB_WQT_RQ
1636 };
1637
1638 enum ib_wq_state {
1639 IB_WQS_RESET,
1640 IB_WQS_RDY,
1641 IB_WQS_ERR
1642 };
1643
1644 struct ib_wq {
1645 struct ib_device *device;
1646 struct ib_uobject *uobject;
1647 void *wq_context;
1648 void (*event_handler)(struct ib_event *, void *);
1649 struct ib_pd *pd;
1650 struct ib_cq *cq;
1651 u32 wq_num;
1652 enum ib_wq_state state;
1653 enum ib_wq_type wq_type;
1654 atomic_t usecnt;
1655 };
1656
1657 enum ib_wq_flags {
1658 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1659 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1660 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
1661 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1662 };
1663
1664 struct ib_wq_init_attr {
1665 void *wq_context;
1666 enum ib_wq_type wq_type;
1667 u32 max_wr;
1668 u32 max_sge;
1669 struct ib_cq *cq;
1670 void (*event_handler)(struct ib_event *, void *);
1671 u32 create_flags; /* Use enum ib_wq_flags */
1672 };
1673
1674 enum ib_wq_attr_mask {
1675 IB_WQ_STATE = 1 << 0,
1676 IB_WQ_CUR_STATE = 1 << 1,
1677 IB_WQ_FLAGS = 1 << 2,
1678 };
1679
1680 struct ib_wq_attr {
1681 enum ib_wq_state wq_state;
1682 enum ib_wq_state curr_wq_state;
1683 u32 flags; /* Use enum ib_wq_flags */
1684 u32 flags_mask; /* Use enum ib_wq_flags */
1685 };
1686
1687 struct ib_rwq_ind_table {
1688 struct ib_device *device;
1689 struct ib_uobject *uobject;
1690 atomic_t usecnt;
1691 u32 ind_tbl_num;
1692 u32 log_ind_tbl_size;
1693 struct ib_wq **ind_tbl;
1694 };
1695
1696 struct ib_rwq_ind_table_init_attr {
1697 u32 log_ind_tbl_size;
1698 /* Each entry is a pointer to Receive Work Queue */
1699 struct ib_wq **ind_tbl;
1700 };
1701
1702 enum port_pkey_state {
1703 IB_PORT_PKEY_NOT_VALID = 0,
1704 IB_PORT_PKEY_VALID = 1,
1705 IB_PORT_PKEY_LISTED = 2,
1706 };
1707
1708 struct ib_qp_security;
1709
1710 struct ib_port_pkey {
1711 enum port_pkey_state state;
1712 u16 pkey_index;
1713 u8 port_num;
1714 struct list_head qp_list;
1715 struct list_head to_error_list;
1716 struct ib_qp_security *sec;
1717 };
1718
1719 struct ib_ports_pkeys {
1720 struct ib_port_pkey main;
1721 struct ib_port_pkey alt;
1722 };
1723
1724 struct ib_qp_security {
1725 struct ib_qp *qp;
1726 struct ib_device *dev;
1727 /* Hold this mutex when changing port and pkey settings. */
1728 struct mutex mutex;
1729 struct ib_ports_pkeys *ports_pkeys;
1730 /* A list of all open shared QP handles. Required to enforce security
1731 * properly for all users of a shared QP.
1732 */
1733 struct list_head shared_qp_list;
1734 void *security;
1735 bool destroying;
1736 atomic_t error_list_count;
1737 struct completion error_complete;
1738 int error_comps_pending;
1739 };
1740
1741 /*
1742 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1743 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1744 */
1745 struct ib_qp {
1746 struct ib_device *device;
1747 struct ib_pd *pd;
1748 struct ib_cq *send_cq;
1749 struct ib_cq *recv_cq;
1750 spinlock_t mr_lock;
1751 int mrs_used;
1752 struct list_head rdma_mrs;
1753 struct list_head sig_mrs;
1754 struct ib_srq *srq;
1755 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1756 struct list_head xrcd_list;
1757
1758 /* count times opened, mcast attaches, flow attaches */
1759 atomic_t usecnt;
1760 struct list_head open_list;
1761 struct ib_qp *real_qp;
1762 struct ib_uobject *uobject;
1763 void (*event_handler)(struct ib_event *, void *);
1764 void *qp_context;
1765 /* sgid_attrs associated with the AV's */
1766 const struct ib_gid_attr *av_sgid_attr;
1767 const struct ib_gid_attr *alt_path_sgid_attr;
1768 u32 qp_num;
1769 u32 max_write_sge;
1770 u32 max_read_sge;
1771 enum ib_qp_type qp_type;
1772 struct ib_rwq_ind_table *rwq_ind_tbl;
1773 struct ib_qp_security *qp_sec;
1774 u8 port;
1775
1776 /*
1777 * Implementation details of the RDMA core, don't use in drivers:
1778 */
1779 struct rdma_restrack_entry res;
1780 };
1781
1782 struct ib_dm {
1783 struct ib_device *device;
1784 u32 length;
1785 u32 flags;
1786 struct ib_uobject *uobject;
1787 atomic_t usecnt;
1788 };
1789
1790 struct ib_mr {
1791 struct ib_device *device;
1792 struct ib_pd *pd;
1793 u32 lkey;
1794 u32 rkey;
1795 u64 iova;
1796 u64 length;
1797 unsigned int page_size;
1798 bool need_inval;
1799 union {
1800 struct ib_uobject *uobject; /* user */
1801 struct list_head qp_entry; /* FR */
1802 };
1803
1804 struct ib_dm *dm;
1805
1806 /*
1807 * Implementation details of the RDMA core, don't use in drivers:
1808 */
1809 struct rdma_restrack_entry res;
1810 };
1811
1812 struct ib_mw {
1813 struct ib_device *device;
1814 struct ib_pd *pd;
1815 struct ib_uobject *uobject;
1816 u32 rkey;
1817 enum ib_mw_type type;
1818 };
1819
1820 struct ib_fmr {
1821 struct ib_device *device;
1822 struct ib_pd *pd;
1823 struct list_head list;
1824 u32 lkey;
1825 u32 rkey;
1826 };
1827
1828 /* Supported steering options */
1829 enum ib_flow_attr_type {
1830 /* steering according to rule specifications */
1831 IB_FLOW_ATTR_NORMAL = 0x0,
1832 /* default unicast and multicast rule -
1833 * receive all Eth traffic which isn't steered to any QP
1834 */
1835 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1836 /* default multicast rule -
1837 * receive all Eth multicast traffic which isn't steered to any QP
1838 */
1839 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1840 /* sniffer rule - receive all port traffic */
1841 IB_FLOW_ATTR_SNIFFER = 0x3
1842 };
1843
1844 /* Supported steering header types */
1845 enum ib_flow_spec_type {
1846 /* L2 headers*/
1847 IB_FLOW_SPEC_ETH = 0x20,
1848 IB_FLOW_SPEC_IB = 0x22,
1849 /* L3 header*/
1850 IB_FLOW_SPEC_IPV4 = 0x30,
1851 IB_FLOW_SPEC_IPV6 = 0x31,
1852 IB_FLOW_SPEC_ESP = 0x34,
1853 /* L4 headers*/
1854 IB_FLOW_SPEC_TCP = 0x40,
1855 IB_FLOW_SPEC_UDP = 0x41,
1856 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1857 IB_FLOW_SPEC_GRE = 0x51,
1858 IB_FLOW_SPEC_MPLS = 0x60,
1859 IB_FLOW_SPEC_INNER = 0x100,
1860 /* Actions */
1861 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1862 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1863 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1864 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1865 };
1866 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1867 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1868
1869 /* Flow steering rule priority is set according to it's domain.
1870 * Lower domain value means higher priority.
1871 */
1872 enum ib_flow_domain {
1873 IB_FLOW_DOMAIN_USER,
1874 IB_FLOW_DOMAIN_ETHTOOL,
1875 IB_FLOW_DOMAIN_RFS,
1876 IB_FLOW_DOMAIN_NIC,
1877 IB_FLOW_DOMAIN_NUM /* Must be last */
1878 };
1879
1880 enum ib_flow_flags {
1881 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1882 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1883 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
1884 };
1885
1886 struct ib_flow_eth_filter {
1887 u8 dst_mac[6];
1888 u8 src_mac[6];
1889 __be16 ether_type;
1890 __be16 vlan_tag;
1891 /* Must be last */
1892 u8 real_sz[0];
1893 };
1894
1895 struct ib_flow_spec_eth {
1896 u32 type;
1897 u16 size;
1898 struct ib_flow_eth_filter val;
1899 struct ib_flow_eth_filter mask;
1900 };
1901
1902 struct ib_flow_ib_filter {
1903 __be16 dlid;
1904 __u8 sl;
1905 /* Must be last */
1906 u8 real_sz[0];
1907 };
1908
1909 struct ib_flow_spec_ib {
1910 u32 type;
1911 u16 size;
1912 struct ib_flow_ib_filter val;
1913 struct ib_flow_ib_filter mask;
1914 };
1915
1916 /* IPv4 header flags */
1917 enum ib_ipv4_flags {
1918 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1919 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
1920 last have this flag set */
1921 };
1922
1923 struct ib_flow_ipv4_filter {
1924 __be32 src_ip;
1925 __be32 dst_ip;
1926 u8 proto;
1927 u8 tos;
1928 u8 ttl;
1929 u8 flags;
1930 /* Must be last */
1931 u8 real_sz[0];
1932 };
1933
1934 struct ib_flow_spec_ipv4 {
1935 u32 type;
1936 u16 size;
1937 struct ib_flow_ipv4_filter val;
1938 struct ib_flow_ipv4_filter mask;
1939 };
1940
1941 struct ib_flow_ipv6_filter {
1942 u8 src_ip[16];
1943 u8 dst_ip[16];
1944 __be32 flow_label;
1945 u8 next_hdr;
1946 u8 traffic_class;
1947 u8 hop_limit;
1948 /* Must be last */
1949 u8 real_sz[0];
1950 };
1951
1952 struct ib_flow_spec_ipv6 {
1953 u32 type;
1954 u16 size;
1955 struct ib_flow_ipv6_filter val;
1956 struct ib_flow_ipv6_filter mask;
1957 };
1958
1959 struct ib_flow_tcp_udp_filter {
1960 __be16 dst_port;
1961 __be16 src_port;
1962 /* Must be last */
1963 u8 real_sz[0];
1964 };
1965
1966 struct ib_flow_spec_tcp_udp {
1967 u32 type;
1968 u16 size;
1969 struct ib_flow_tcp_udp_filter val;
1970 struct ib_flow_tcp_udp_filter mask;
1971 };
1972
1973 struct ib_flow_tunnel_filter {
1974 __be32 tunnel_id;
1975 u8 real_sz[0];
1976 };
1977
1978 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1979 * the tunnel_id from val has the vni value
1980 */
1981 struct ib_flow_spec_tunnel {
1982 u32 type;
1983 u16 size;
1984 struct ib_flow_tunnel_filter val;
1985 struct ib_flow_tunnel_filter mask;
1986 };
1987
1988 struct ib_flow_esp_filter {
1989 __be32 spi;
1990 __be32 seq;
1991 /* Must be last */
1992 u8 real_sz[0];
1993 };
1994
1995 struct ib_flow_spec_esp {
1996 u32 type;
1997 u16 size;
1998 struct ib_flow_esp_filter val;
1999 struct ib_flow_esp_filter mask;
2000 };
2001
2002 struct ib_flow_gre_filter {
2003 __be16 c_ks_res0_ver;
2004 __be16 protocol;
2005 __be32 key;
2006 /* Must be last */
2007 u8 real_sz[0];
2008 };
2009
2010 struct ib_flow_spec_gre {
2011 u32 type;
2012 u16 size;
2013 struct ib_flow_gre_filter val;
2014 struct ib_flow_gre_filter mask;
2015 };
2016
2017 struct ib_flow_mpls_filter {
2018 __be32 tag;
2019 /* Must be last */
2020 u8 real_sz[0];
2021 };
2022
2023 struct ib_flow_spec_mpls {
2024 u32 type;
2025 u16 size;
2026 struct ib_flow_mpls_filter val;
2027 struct ib_flow_mpls_filter mask;
2028 };
2029
2030 struct ib_flow_spec_action_tag {
2031 enum ib_flow_spec_type type;
2032 u16 size;
2033 u32 tag_id;
2034 };
2035
2036 struct ib_flow_spec_action_drop {
2037 enum ib_flow_spec_type type;
2038 u16 size;
2039 };
2040
2041 struct ib_flow_spec_action_handle {
2042 enum ib_flow_spec_type type;
2043 u16 size;
2044 struct ib_flow_action *act;
2045 };
2046
2047 enum ib_counters_description {
2048 IB_COUNTER_PACKETS,
2049 IB_COUNTER_BYTES,
2050 };
2051
2052 struct ib_flow_spec_action_count {
2053 enum ib_flow_spec_type type;
2054 u16 size;
2055 struct ib_counters *counters;
2056 };
2057
2058 union ib_flow_spec {
2059 struct {
2060 u32 type;
2061 u16 size;
2062 };
2063 struct ib_flow_spec_eth eth;
2064 struct ib_flow_spec_ib ib;
2065 struct ib_flow_spec_ipv4 ipv4;
2066 struct ib_flow_spec_tcp_udp tcp_udp;
2067 struct ib_flow_spec_ipv6 ipv6;
2068 struct ib_flow_spec_tunnel tunnel;
2069 struct ib_flow_spec_esp esp;
2070 struct ib_flow_spec_gre gre;
2071 struct ib_flow_spec_mpls mpls;
2072 struct ib_flow_spec_action_tag flow_tag;
2073 struct ib_flow_spec_action_drop drop;
2074 struct ib_flow_spec_action_handle action;
2075 struct ib_flow_spec_action_count flow_count;
2076 };
2077
2078 struct ib_flow_attr {
2079 enum ib_flow_attr_type type;
2080 u16 size;
2081 u16 priority;
2082 u32 flags;
2083 u8 num_of_specs;
2084 u8 port;
2085 union ib_flow_spec flows[];
2086 };
2087
2088 struct ib_flow {
2089 struct ib_qp *qp;
2090 struct ib_device *device;
2091 struct ib_uobject *uobject;
2092 };
2093
2094 enum ib_flow_action_type {
2095 IB_FLOW_ACTION_UNSPECIFIED,
2096 IB_FLOW_ACTION_ESP = 1,
2097 };
2098
2099 struct ib_flow_action_attrs_esp_keymats {
2100 enum ib_uverbs_flow_action_esp_keymat protocol;
2101 union {
2102 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2103 } keymat;
2104 };
2105
2106 struct ib_flow_action_attrs_esp_replays {
2107 enum ib_uverbs_flow_action_esp_replay protocol;
2108 union {
2109 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2110 } replay;
2111 };
2112
2113 enum ib_flow_action_attrs_esp_flags {
2114 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2115 * This is done in order to share the same flags between user-space and
2116 * kernel and spare an unnecessary translation.
2117 */
2118
2119 /* Kernel flags */
2120 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2121 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2122 };
2123
2124 struct ib_flow_spec_list {
2125 struct ib_flow_spec_list *next;
2126 union ib_flow_spec spec;
2127 };
2128
2129 struct ib_flow_action_attrs_esp {
2130 struct ib_flow_action_attrs_esp_keymats *keymat;
2131 struct ib_flow_action_attrs_esp_replays *replay;
2132 struct ib_flow_spec_list *encap;
2133 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2134 * Value of 0 is a valid value.
2135 */
2136 u32 esn;
2137 u32 spi;
2138 u32 seq;
2139 u32 tfc_pad;
2140 /* Use enum ib_flow_action_attrs_esp_flags */
2141 u64 flags;
2142 u64 hard_limit_pkts;
2143 };
2144
2145 struct ib_flow_action {
2146 struct ib_device *device;
2147 struct ib_uobject *uobject;
2148 enum ib_flow_action_type type;
2149 atomic_t usecnt;
2150 };
2151
2152 struct ib_mad_hdr;
2153 struct ib_grh;
2154
2155 enum ib_process_mad_flags {
2156 IB_MAD_IGNORE_MKEY = 1,
2157 IB_MAD_IGNORE_BKEY = 2,
2158 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2159 };
2160
2161 enum ib_mad_result {
2162 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
2163 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
2164 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
2165 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
2166 };
2167
2168 struct ib_port_cache {
2169 u64 subnet_prefix;
2170 struct ib_pkey_cache *pkey;
2171 struct ib_gid_table *gid;
2172 u8 lmc;
2173 enum ib_port_state port_state;
2174 };
2175
2176 struct ib_cache {
2177 rwlock_t lock;
2178 struct ib_event_handler event_handler;
2179 struct ib_port_cache *ports;
2180 };
2181
2182 struct iw_cm_verbs;
2183
2184 struct ib_port_immutable {
2185 int pkey_tbl_len;
2186 int gid_tbl_len;
2187 u32 core_cap_flags;
2188 u32 max_mad_size;
2189 };
2190
2191 /* rdma netdev type - specifies protocol type */
2192 enum rdma_netdev_t {
2193 RDMA_NETDEV_OPA_VNIC,
2194 RDMA_NETDEV_IPOIB,
2195 };
2196
2197 /**
2198 * struct rdma_netdev - rdma netdev
2199 * For cases where netstack interfacing is required.
2200 */
2201 struct rdma_netdev {
2202 void *clnt_priv;
2203 struct ib_device *hca;
2204 u8 port_num;
2205
2206 /*
2207 * cleanup function must be specified.
2208 * FIXME: This is only used for OPA_VNIC and that usage should be
2209 * removed too.
2210 */
2211 void (*free_rdma_netdev)(struct net_device *netdev);
2212
2213 /* control functions */
2214 void (*set_id)(struct net_device *netdev, int id);
2215 /* send packet */
2216 int (*send)(struct net_device *dev, struct sk_buff *skb,
2217 struct ib_ah *address, u32 dqpn);
2218 /* multicast */
2219 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2220 union ib_gid *gid, u16 mlid,
2221 int set_qkey, u32 qkey);
2222 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2223 union ib_gid *gid, u16 mlid);
2224 };
2225
2226 struct ib_port_pkey_list {
2227 /* Lock to hold while modifying the list. */
2228 spinlock_t list_lock;
2229 struct list_head pkey_list;
2230 };
2231
2232 struct ib_counters {
2233 struct ib_device *device;
2234 struct ib_uobject *uobject;
2235 /* num of objects attached */
2236 atomic_t usecnt;
2237 };
2238
2239 struct ib_counters_read_attr {
2240 u64 *counters_buff;
2241 u32 ncounters;
2242 u32 flags; /* use enum ib_read_counters_flags */
2243 };
2244
2245 struct uverbs_attr_bundle;
2246
2247 struct ib_device {
2248 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2249 struct device *dma_device;
2250
2251 char name[IB_DEVICE_NAME_MAX];
2252
2253 struct list_head event_handler_list;
2254 spinlock_t event_handler_lock;
2255
2256 spinlock_t client_data_lock;
2257 struct list_head core_list;
2258 /* Access to the client_data_list is protected by the client_data_lock
2259 * spinlock and the lists_rwsem read-write semaphore */
2260 struct list_head client_data_list;
2261
2262 struct ib_cache cache;
2263 /**
2264 * port_immutable is indexed by port number
2265 */
2266 struct ib_port_immutable *port_immutable;
2267
2268 int num_comp_vectors;
2269
2270 struct ib_port_pkey_list *port_pkey_list;
2271
2272 struct iw_cm_verbs *iwcm;
2273
2274 /**
2275 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2276 * driver initialized data. The struct is kfree()'ed by the sysfs
2277 * core when the device is removed. A lifespan of -1 in the return
2278 * struct tells the core to set a default lifespan.
2279 */
2280 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2281 u8 port_num);
2282 /**
2283 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2284 * @index - The index in the value array we wish to have updated, or
2285 * num_counters if we want all stats updated
2286 * Return codes -
2287 * < 0 - Error, no counters updated
2288 * index - Updated the single counter pointed to by index
2289 * num_counters - Updated all counters (will reset the timestamp
2290 * and prevent further calls for lifespan milliseconds)
2291 * Drivers are allowed to update all counters in leiu of just the
2292 * one given in index at their option
2293 */
2294 int (*get_hw_stats)(struct ib_device *device,
2295 struct rdma_hw_stats *stats,
2296 u8 port, int index);
2297 int (*query_device)(struct ib_device *device,
2298 struct ib_device_attr *device_attr,
2299 struct ib_udata *udata);
2300 int (*query_port)(struct ib_device *device,
2301 u8 port_num,
2302 struct ib_port_attr *port_attr);
2303 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2304 u8 port_num);
2305 /* When calling get_netdev, the HW vendor's driver should return the
2306 * net device of device @device at port @port_num or NULL if such
2307 * a net device doesn't exist. The vendor driver should call dev_hold
2308 * on this net device. The HW vendor's device driver must guarantee
2309 * that this function returns NULL before the net device has finished
2310 * NETDEV_UNREGISTER state.
2311 */
2312 struct net_device *(*get_netdev)(struct ib_device *device,
2313 u8 port_num);
2314 /* query_gid should be return GID value for @device, when @port_num
2315 * link layer is either IB or iWarp. It is no-op if @port_num port
2316 * is RoCE link layer.
2317 */
2318 int (*query_gid)(struct ib_device *device,
2319 u8 port_num, int index,
2320 union ib_gid *gid);
2321 /* When calling add_gid, the HW vendor's driver should add the gid
2322 * of device of port at gid index available at @attr. Meta-info of
2323 * that gid (for example, the network device related to this gid) is
2324 * available at @attr. @context allows the HW vendor driver to store
2325 * extra information together with a GID entry. The HW vendor driver may
2326 * allocate memory to contain this information and store it in @context
2327 * when a new GID entry is written to. Params are consistent until the
2328 * next call of add_gid or delete_gid. The function should return 0 on
2329 * success or error otherwise. The function could be called
2330 * concurrently for different ports. This function is only called when
2331 * roce_gid_table is used.
2332 */
2333 int (*add_gid)(const struct ib_gid_attr *attr,
2334 void **context);
2335 /* When calling del_gid, the HW vendor's driver should delete the
2336 * gid of device @device at gid index gid_index of port port_num
2337 * available in @attr.
2338 * Upon the deletion of a GID entry, the HW vendor must free any
2339 * allocated memory. The caller will clear @context afterwards.
2340 * This function is only called when roce_gid_table is used.
2341 */
2342 int (*del_gid)(const struct ib_gid_attr *attr,
2343 void **context);
2344 int (*query_pkey)(struct ib_device *device,
2345 u8 port_num, u16 index, u16 *pkey);
2346 int (*modify_device)(struct ib_device *device,
2347 int device_modify_mask,
2348 struct ib_device_modify *device_modify);
2349 int (*modify_port)(struct ib_device *device,
2350 u8 port_num, int port_modify_mask,
2351 struct ib_port_modify *port_modify);
2352 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2353 struct ib_udata *udata);
2354 int (*dealloc_ucontext)(struct ib_ucontext *context);
2355 int (*mmap)(struct ib_ucontext *context,
2356 struct vm_area_struct *vma);
2357 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2358 struct ib_ucontext *context,
2359 struct ib_udata *udata);
2360 int (*dealloc_pd)(struct ib_pd *pd);
2361 struct ib_ah * (*create_ah)(struct ib_pd *pd,
2362 struct rdma_ah_attr *ah_attr,
2363 struct ib_udata *udata);
2364 int (*modify_ah)(struct ib_ah *ah,
2365 struct rdma_ah_attr *ah_attr);
2366 int (*query_ah)(struct ib_ah *ah,
2367 struct rdma_ah_attr *ah_attr);
2368 int (*destroy_ah)(struct ib_ah *ah);
2369 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2370 struct ib_srq_init_attr *srq_init_attr,
2371 struct ib_udata *udata);
2372 int (*modify_srq)(struct ib_srq *srq,
2373 struct ib_srq_attr *srq_attr,
2374 enum ib_srq_attr_mask srq_attr_mask,
2375 struct ib_udata *udata);
2376 int (*query_srq)(struct ib_srq *srq,
2377 struct ib_srq_attr *srq_attr);
2378 int (*destroy_srq)(struct ib_srq *srq);
2379 int (*post_srq_recv)(struct ib_srq *srq,
2380 const struct ib_recv_wr *recv_wr,
2381 const struct ib_recv_wr **bad_recv_wr);
2382 struct ib_qp * (*create_qp)(struct ib_pd *pd,
2383 struct ib_qp_init_attr *qp_init_attr,
2384 struct ib_udata *udata);
2385 int (*modify_qp)(struct ib_qp *qp,
2386 struct ib_qp_attr *qp_attr,
2387 int qp_attr_mask,
2388 struct ib_udata *udata);
2389 int (*query_qp)(struct ib_qp *qp,
2390 struct ib_qp_attr *qp_attr,
2391 int qp_attr_mask,
2392 struct ib_qp_init_attr *qp_init_attr);
2393 int (*destroy_qp)(struct ib_qp *qp);
2394 int (*post_send)(struct ib_qp *qp,
2395 const struct ib_send_wr *send_wr,
2396 const struct ib_send_wr **bad_send_wr);
2397 int (*post_recv)(struct ib_qp *qp,
2398 const struct ib_recv_wr *recv_wr,
2399 const struct ib_recv_wr **bad_recv_wr);
2400 struct ib_cq * (*create_cq)(struct ib_device *device,
2401 const struct ib_cq_init_attr *attr,
2402 struct ib_ucontext *context,
2403 struct ib_udata *udata);
2404 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2405 u16 cq_period);
2406 int (*destroy_cq)(struct ib_cq *cq);
2407 int (*resize_cq)(struct ib_cq *cq, int cqe,
2408 struct ib_udata *udata);
2409 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2410 struct ib_wc *wc);
2411 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2412 int (*req_notify_cq)(struct ib_cq *cq,
2413 enum ib_cq_notify_flags flags);
2414 int (*req_ncomp_notif)(struct ib_cq *cq,
2415 int wc_cnt);
2416 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2417 int mr_access_flags);
2418 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
2419 u64 start, u64 length,
2420 u64 virt_addr,
2421 int mr_access_flags,
2422 struct ib_udata *udata);
2423 int (*rereg_user_mr)(struct ib_mr *mr,
2424 int flags,
2425 u64 start, u64 length,
2426 u64 virt_addr,
2427 int mr_access_flags,
2428 struct ib_pd *pd,
2429 struct ib_udata *udata);
2430 int (*dereg_mr)(struct ib_mr *mr);
2431 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2432 enum ib_mr_type mr_type,
2433 u32 max_num_sg);
2434 int (*map_mr_sg)(struct ib_mr *mr,
2435 struct scatterlist *sg,
2436 int sg_nents,
2437 unsigned int *sg_offset);
2438 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
2439 enum ib_mw_type type,
2440 struct ib_udata *udata);
2441 int (*dealloc_mw)(struct ib_mw *mw);
2442 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2443 int mr_access_flags,
2444 struct ib_fmr_attr *fmr_attr);
2445 int (*map_phys_fmr)(struct ib_fmr *fmr,
2446 u64 *page_list, int list_len,
2447 u64 iova);
2448 int (*unmap_fmr)(struct list_head *fmr_list);
2449 int (*dealloc_fmr)(struct ib_fmr *fmr);
2450 int (*attach_mcast)(struct ib_qp *qp,
2451 union ib_gid *gid,
2452 u16 lid);
2453 int (*detach_mcast)(struct ib_qp *qp,
2454 union ib_gid *gid,
2455 u16 lid);
2456 int (*process_mad)(struct ib_device *device,
2457 int process_mad_flags,
2458 u8 port_num,
2459 const struct ib_wc *in_wc,
2460 const struct ib_grh *in_grh,
2461 const struct ib_mad_hdr *in_mad,
2462 size_t in_mad_size,
2463 struct ib_mad_hdr *out_mad,
2464 size_t *out_mad_size,
2465 u16 *out_mad_pkey_index);
2466 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2467 struct ib_ucontext *ucontext,
2468 struct ib_udata *udata);
2469 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2470 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2471 struct ib_flow_attr
2472 *flow_attr,
2473 int domain,
2474 struct ib_udata *udata);
2475 int (*destroy_flow)(struct ib_flow *flow_id);
2476 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2477 struct ib_mr_status *mr_status);
2478 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2479 void (*drain_rq)(struct ib_qp *qp);
2480 void (*drain_sq)(struct ib_qp *qp);
2481 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2482 int state);
2483 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2484 struct ifla_vf_info *ivf);
2485 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2486 struct ifla_vf_stats *stats);
2487 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2488 int type);
2489 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2490 struct ib_wq_init_attr *init_attr,
2491 struct ib_udata *udata);
2492 int (*destroy_wq)(struct ib_wq *wq);
2493 int (*modify_wq)(struct ib_wq *wq,
2494 struct ib_wq_attr *attr,
2495 u32 wq_attr_mask,
2496 struct ib_udata *udata);
2497 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2498 struct ib_rwq_ind_table_init_attr *init_attr,
2499 struct ib_udata *udata);
2500 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2501 struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
2502 const struct ib_flow_action_attrs_esp *attr,
2503 struct uverbs_attr_bundle *attrs);
2504 int (*destroy_flow_action)(struct ib_flow_action *action);
2505 int (*modify_flow_action_esp)(struct ib_flow_action *action,
2506 const struct ib_flow_action_attrs_esp *attr,
2507 struct uverbs_attr_bundle *attrs);
2508 struct ib_dm * (*alloc_dm)(struct ib_device *device,
2509 struct ib_ucontext *context,
2510 struct ib_dm_alloc_attr *attr,
2511 struct uverbs_attr_bundle *attrs);
2512 int (*dealloc_dm)(struct ib_dm *dm);
2513 struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2514 struct ib_dm_mr_attr *attr,
2515 struct uverbs_attr_bundle *attrs);
2516 struct ib_counters * (*create_counters)(struct ib_device *device,
2517 struct uverbs_attr_bundle *attrs);
2518 int (*destroy_counters)(struct ib_counters *counters);
2519 int (*read_counters)(struct ib_counters *counters,
2520 struct ib_counters_read_attr *counters_read_attr,
2521 struct uverbs_attr_bundle *attrs);
2522
2523 /**
2524 * rdma netdev operation
2525 *
2526 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2527 * doesn't support the specified rdma netdev type.
2528 */
2529 struct net_device *(*alloc_rdma_netdev)(
2530 struct ib_device *device,
2531 u8 port_num,
2532 enum rdma_netdev_t type,
2533 const char *name,
2534 unsigned char name_assign_type,
2535 void (*setup)(struct net_device *));
2536
2537 struct module *owner;
2538 struct device dev;
2539 struct kobject *ports_parent;
2540 struct list_head port_list;
2541
2542 enum {
2543 IB_DEV_UNINITIALIZED,
2544 IB_DEV_REGISTERED,
2545 IB_DEV_UNREGISTERED
2546 } reg_state;
2547
2548 int uverbs_abi_ver;
2549 u64 uverbs_cmd_mask;
2550 u64 uverbs_ex_cmd_mask;
2551
2552 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2553 __be64 node_guid;
2554 u32 local_dma_lkey;
2555 u16 is_switch:1;
2556 u8 node_type;
2557 u8 phys_port_cnt;
2558 struct ib_device_attr attrs;
2559 struct attribute_group *hw_stats_ag;
2560 struct rdma_hw_stats *hw_stats;
2561
2562 #ifdef CONFIG_CGROUP_RDMA
2563 struct rdmacg_device cg_device;
2564 #endif
2565
2566 u32 index;
2567 /*
2568 * Implementation details of the RDMA core, don't use in drivers
2569 */
2570 struct rdma_restrack_root res;
2571
2572 /**
2573 * The following mandatory functions are used only at device
2574 * registration. Keep functions such as these at the end of this
2575 * structure to avoid cache line misses when accessing struct ib_device
2576 * in fast paths.
2577 */
2578 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2579 void (*get_dev_fw_str)(struct ib_device *, char *str);
2580 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2581 int comp_vector);
2582
2583 const struct uverbs_object_tree_def *const *driver_specs;
2584 enum rdma_driver_id driver_id;
2585 };
2586
2587 struct ib_client {
2588 char *name;
2589 void (*add) (struct ib_device *);
2590 void (*remove)(struct ib_device *, void *client_data);
2591
2592 /* Returns the net_dev belonging to this ib_client and matching the
2593 * given parameters.
2594 * @dev: An RDMA device that the net_dev use for communication.
2595 * @port: A physical port number on the RDMA device.
2596 * @pkey: P_Key that the net_dev uses if applicable.
2597 * @gid: A GID that the net_dev uses to communicate.
2598 * @addr: An IP address the net_dev is configured with.
2599 * @client_data: The device's client data set by ib_set_client_data().
2600 *
2601 * An ib_client that implements a net_dev on top of RDMA devices
2602 * (such as IP over IB) should implement this callback, allowing the
2603 * rdma_cm module to find the right net_dev for a given request.
2604 *
2605 * The caller is responsible for calling dev_put on the returned
2606 * netdev. */
2607 struct net_device *(*get_net_dev_by_params)(
2608 struct ib_device *dev,
2609 u8 port,
2610 u16 pkey,
2611 const union ib_gid *gid,
2612 const struct sockaddr *addr,
2613 void *client_data);
2614 struct list_head list;
2615 };
2616
2617 struct ib_device *ib_alloc_device(size_t size);
2618 void ib_dealloc_device(struct ib_device *device);
2619
2620 void ib_get_device_fw_str(struct ib_device *device, char *str);
2621
2622 int ib_register_device(struct ib_device *device,
2623 int (*port_callback)(struct ib_device *,
2624 u8, struct kobject *));
2625 void ib_unregister_device(struct ib_device *device);
2626
2627 int ib_register_client (struct ib_client *client);
2628 void ib_unregister_client(struct ib_client *client);
2629
2630 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2631 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2632 void *data);
2633
ib_copy_from_udata(void * dest,struct ib_udata * udata,size_t len)2634 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2635 {
2636 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2637 }
2638
ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)2639 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2640 {
2641 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2642 }
2643
ib_is_buffer_cleared(const void __user * p,size_t len)2644 static inline bool ib_is_buffer_cleared(const void __user *p,
2645 size_t len)
2646 {
2647 bool ret;
2648 u8 *buf;
2649
2650 if (len > USHRT_MAX)
2651 return false;
2652
2653 buf = memdup_user(p, len);
2654 if (IS_ERR(buf))
2655 return false;
2656
2657 ret = !memchr_inv(buf, 0, len);
2658 kfree(buf);
2659 return ret;
2660 }
2661
ib_is_udata_cleared(struct ib_udata * udata,size_t offset,size_t len)2662 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2663 size_t offset,
2664 size_t len)
2665 {
2666 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2667 }
2668
2669 /**
2670 * ib_is_destroy_retryable - Check whether the uobject destruction
2671 * is retryable.
2672 * @ret: The initial destruction return code
2673 * @why: remove reason
2674 * @uobj: The uobject that is destroyed
2675 *
2676 * This function is a helper function that IB layer and low-level drivers
2677 * can use to consider whether the destruction of the given uobject is
2678 * retry-able.
2679 * It checks the original return code, if it wasn't success the destruction
2680 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2681 * the remove reason. (i.e. why).
2682 * Must be called with the object locked for destroy.
2683 */
ib_is_destroy_retryable(int ret,enum rdma_remove_reason why,struct ib_uobject * uobj)2684 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2685 struct ib_uobject *uobj)
2686 {
2687 return ret && (why == RDMA_REMOVE_DESTROY ||
2688 uobj->context->cleanup_retryable);
2689 }
2690
2691 /**
2692 * ib_destroy_usecnt - Called during destruction to check the usecnt
2693 * @usecnt: The usecnt atomic
2694 * @why: remove reason
2695 * @uobj: The uobject that is destroyed
2696 *
2697 * Non-zero usecnts will block destruction unless destruction was triggered by
2698 * a ucontext cleanup.
2699 */
ib_destroy_usecnt(atomic_t * usecnt,enum rdma_remove_reason why,struct ib_uobject * uobj)2700 static inline int ib_destroy_usecnt(atomic_t *usecnt,
2701 enum rdma_remove_reason why,
2702 struct ib_uobject *uobj)
2703 {
2704 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2705 return -EBUSY;
2706 return 0;
2707 }
2708
2709 /**
2710 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2711 * contains all required attributes and no attributes not allowed for
2712 * the given QP state transition.
2713 * @cur_state: Current QP state
2714 * @next_state: Next QP state
2715 * @type: QP type
2716 * @mask: Mask of supplied QP attributes
2717 * @ll : link layer of port
2718 *
2719 * This function is a helper function that a low-level driver's
2720 * modify_qp method can use to validate the consumer's input. It
2721 * checks that cur_state and next_state are valid QP states, that a
2722 * transition from cur_state to next_state is allowed by the IB spec,
2723 * and that the attribute mask supplied is allowed for the transition.
2724 */
2725 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2726 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2727 enum rdma_link_layer ll);
2728
2729 void ib_register_event_handler(struct ib_event_handler *event_handler);
2730 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2731 void ib_dispatch_event(struct ib_event *event);
2732
2733 int ib_query_port(struct ib_device *device,
2734 u8 port_num, struct ib_port_attr *port_attr);
2735
2736 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2737 u8 port_num);
2738
2739 /**
2740 * rdma_cap_ib_switch - Check if the device is IB switch
2741 * @device: Device to check
2742 *
2743 * Device driver is responsible for setting is_switch bit on
2744 * in ib_device structure at init time.
2745 *
2746 * Return: true if the device is IB switch.
2747 */
rdma_cap_ib_switch(const struct ib_device * device)2748 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2749 {
2750 return device->is_switch;
2751 }
2752
2753 /**
2754 * rdma_start_port - Return the first valid port number for the device
2755 * specified
2756 *
2757 * @device: Device to be checked
2758 *
2759 * Return start port number
2760 */
rdma_start_port(const struct ib_device * device)2761 static inline u8 rdma_start_port(const struct ib_device *device)
2762 {
2763 return rdma_cap_ib_switch(device) ? 0 : 1;
2764 }
2765
2766 /**
2767 * rdma_end_port - Return the last valid port number for the device
2768 * specified
2769 *
2770 * @device: Device to be checked
2771 *
2772 * Return last port number
2773 */
rdma_end_port(const struct ib_device * device)2774 static inline u8 rdma_end_port(const struct ib_device *device)
2775 {
2776 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2777 }
2778
rdma_is_port_valid(const struct ib_device * device,unsigned int port)2779 static inline int rdma_is_port_valid(const struct ib_device *device,
2780 unsigned int port)
2781 {
2782 return (port >= rdma_start_port(device) &&
2783 port <= rdma_end_port(device));
2784 }
2785
rdma_is_grh_required(const struct ib_device * device,u8 port_num)2786 static inline bool rdma_is_grh_required(const struct ib_device *device,
2787 u8 port_num)
2788 {
2789 return device->port_immutable[port_num].core_cap_flags &
2790 RDMA_CORE_PORT_IB_GRH_REQUIRED;
2791 }
2792
rdma_protocol_ib(const struct ib_device * device,u8 port_num)2793 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2794 {
2795 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2796 }
2797
rdma_protocol_roce(const struct ib_device * device,u8 port_num)2798 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2799 {
2800 return device->port_immutable[port_num].core_cap_flags &
2801 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2802 }
2803
rdma_protocol_roce_udp_encap(const struct ib_device * device,u8 port_num)2804 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2805 {
2806 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2807 }
2808
rdma_protocol_roce_eth_encap(const struct ib_device * device,u8 port_num)2809 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2810 {
2811 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2812 }
2813
rdma_protocol_iwarp(const struct ib_device * device,u8 port_num)2814 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2815 {
2816 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2817 }
2818
rdma_ib_or_roce(const struct ib_device * device,u8 port_num)2819 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2820 {
2821 return rdma_protocol_ib(device, port_num) ||
2822 rdma_protocol_roce(device, port_num);
2823 }
2824
rdma_protocol_raw_packet(const struct ib_device * device,u8 port_num)2825 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2826 {
2827 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2828 }
2829
rdma_protocol_usnic(const struct ib_device * device,u8 port_num)2830 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2831 {
2832 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2833 }
2834
2835 /**
2836 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2837 * Management Datagrams.
2838 * @device: Device to check
2839 * @port_num: Port number to check
2840 *
2841 * Management Datagrams (MAD) are a required part of the InfiniBand
2842 * specification and are supported on all InfiniBand devices. A slightly
2843 * extended version are also supported on OPA interfaces.
2844 *
2845 * Return: true if the port supports sending/receiving of MAD packets.
2846 */
rdma_cap_ib_mad(const struct ib_device * device,u8 port_num)2847 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2848 {
2849 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2850 }
2851
2852 /**
2853 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2854 * Management Datagrams.
2855 * @device: Device to check
2856 * @port_num: Port number to check
2857 *
2858 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2859 * datagrams with their own versions. These OPA MADs share many but not all of
2860 * the characteristics of InfiniBand MADs.
2861 *
2862 * OPA MADs differ in the following ways:
2863 *
2864 * 1) MADs are variable size up to 2K
2865 * IBTA defined MADs remain fixed at 256 bytes
2866 * 2) OPA SMPs must carry valid PKeys
2867 * 3) OPA SMP packets are a different format
2868 *
2869 * Return: true if the port supports OPA MAD packet formats.
2870 */
rdma_cap_opa_mad(struct ib_device * device,u8 port_num)2871 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2872 {
2873 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2874 == RDMA_CORE_CAP_OPA_MAD;
2875 }
2876
2877 /**
2878 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2879 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2880 * @device: Device to check
2881 * @port_num: Port number to check
2882 *
2883 * Each InfiniBand node is required to provide a Subnet Management Agent
2884 * that the subnet manager can access. Prior to the fabric being fully
2885 * configured by the subnet manager, the SMA is accessed via a well known
2886 * interface called the Subnet Management Interface (SMI). This interface
2887 * uses directed route packets to communicate with the SM to get around the
2888 * chicken and egg problem of the SM needing to know what's on the fabric
2889 * in order to configure the fabric, and needing to configure the fabric in
2890 * order to send packets to the devices on the fabric. These directed
2891 * route packets do not need the fabric fully configured in order to reach
2892 * their destination. The SMI is the only method allowed to send
2893 * directed route packets on an InfiniBand fabric.
2894 *
2895 * Return: true if the port provides an SMI.
2896 */
rdma_cap_ib_smi(const struct ib_device * device,u8 port_num)2897 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2898 {
2899 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2900 }
2901
2902 /**
2903 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2904 * Communication Manager.
2905 * @device: Device to check
2906 * @port_num: Port number to check
2907 *
2908 * The InfiniBand Communication Manager is one of many pre-defined General
2909 * Service Agents (GSA) that are accessed via the General Service
2910 * Interface (GSI). It's role is to facilitate establishment of connections
2911 * between nodes as well as other management related tasks for established
2912 * connections.
2913 *
2914 * Return: true if the port supports an IB CM (this does not guarantee that
2915 * a CM is actually running however).
2916 */
rdma_cap_ib_cm(const struct ib_device * device,u8 port_num)2917 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2918 {
2919 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2920 }
2921
2922 /**
2923 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2924 * Communication Manager.
2925 * @device: Device to check
2926 * @port_num: Port number to check
2927 *
2928 * Similar to above, but specific to iWARP connections which have a different
2929 * managment protocol than InfiniBand.
2930 *
2931 * Return: true if the port supports an iWARP CM (this does not guarantee that
2932 * a CM is actually running however).
2933 */
rdma_cap_iw_cm(const struct ib_device * device,u8 port_num)2934 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2935 {
2936 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2937 }
2938
2939 /**
2940 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2941 * Subnet Administration.
2942 * @device: Device to check
2943 * @port_num: Port number to check
2944 *
2945 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2946 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
2947 * fabrics, devices should resolve routes to other hosts by contacting the
2948 * SA to query the proper route.
2949 *
2950 * Return: true if the port should act as a client to the fabric Subnet
2951 * Administration interface. This does not imply that the SA service is
2952 * running locally.
2953 */
rdma_cap_ib_sa(const struct ib_device * device,u8 port_num)2954 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2955 {
2956 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2957 }
2958
2959 /**
2960 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2961 * Multicast.
2962 * @device: Device to check
2963 * @port_num: Port number to check
2964 *
2965 * InfiniBand multicast registration is more complex than normal IPv4 or
2966 * IPv6 multicast registration. Each Host Channel Adapter must register
2967 * with the Subnet Manager when it wishes to join a multicast group. It
2968 * should do so only once regardless of how many queue pairs it subscribes
2969 * to this group. And it should leave the group only after all queue pairs
2970 * attached to the group have been detached.
2971 *
2972 * Return: true if the port must undertake the additional adminstrative
2973 * overhead of registering/unregistering with the SM and tracking of the
2974 * total number of queue pairs attached to the multicast group.
2975 */
rdma_cap_ib_mcast(const struct ib_device * device,u8 port_num)2976 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2977 {
2978 return rdma_cap_ib_sa(device, port_num);
2979 }
2980
2981 /**
2982 * rdma_cap_af_ib - Check if the port of device has the capability
2983 * Native Infiniband Address.
2984 * @device: Device to check
2985 * @port_num: Port number to check
2986 *
2987 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2988 * GID. RoCE uses a different mechanism, but still generates a GID via
2989 * a prescribed mechanism and port specific data.
2990 *
2991 * Return: true if the port uses a GID address to identify devices on the
2992 * network.
2993 */
rdma_cap_af_ib(const struct ib_device * device,u8 port_num)2994 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2995 {
2996 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2997 }
2998
2999 /**
3000 * rdma_cap_eth_ah - Check if the port of device has the capability
3001 * Ethernet Address Handle.
3002 * @device: Device to check
3003 * @port_num: Port number to check
3004 *
3005 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3006 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3007 * port. Normally, packet headers are generated by the sending host
3008 * adapter, but when sending connectionless datagrams, we must manually
3009 * inject the proper headers for the fabric we are communicating over.
3010 *
3011 * Return: true if we are running as a RoCE port and must force the
3012 * addition of a Global Route Header built from our Ethernet Address
3013 * Handle into our header list for connectionless packets.
3014 */
rdma_cap_eth_ah(const struct ib_device * device,u8 port_num)3015 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3016 {
3017 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
3018 }
3019
3020 /**
3021 * rdma_cap_opa_ah - Check if the port of device supports
3022 * OPA Address handles
3023 * @device: Device to check
3024 * @port_num: Port number to check
3025 *
3026 * Return: true if we are running on an OPA device which supports
3027 * the extended OPA addressing.
3028 */
rdma_cap_opa_ah(struct ib_device * device,u8 port_num)3029 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3030 {
3031 return (device->port_immutable[port_num].core_cap_flags &
3032 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3033 }
3034
3035 /**
3036 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3037 *
3038 * @device: Device
3039 * @port_num: Port number
3040 *
3041 * This MAD size includes the MAD headers and MAD payload. No other headers
3042 * are included.
3043 *
3044 * Return the max MAD size required by the Port. Will return 0 if the port
3045 * does not support MADs
3046 */
rdma_max_mad_size(const struct ib_device * device,u8 port_num)3047 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3048 {
3049 return device->port_immutable[port_num].max_mad_size;
3050 }
3051
3052 /**
3053 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3054 * @device: Device to check
3055 * @port_num: Port number to check
3056 *
3057 * RoCE GID table mechanism manages the various GIDs for a device.
3058 *
3059 * NOTE: if allocating the port's GID table has failed, this call will still
3060 * return true, but any RoCE GID table API will fail.
3061 *
3062 * Return: true if the port uses RoCE GID table mechanism in order to manage
3063 * its GIDs.
3064 */
rdma_cap_roce_gid_table(const struct ib_device * device,u8 port_num)3065 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3066 u8 port_num)
3067 {
3068 return rdma_protocol_roce(device, port_num) &&
3069 device->add_gid && device->del_gid;
3070 }
3071
3072 /*
3073 * Check if the device supports READ W/ INVALIDATE.
3074 */
rdma_cap_read_inv(struct ib_device * dev,u32 port_num)3075 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3076 {
3077 /*
3078 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3079 * has support for it yet.
3080 */
3081 return rdma_protocol_iwarp(dev, port_num);
3082 }
3083
3084 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3085 int state);
3086 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3087 struct ifla_vf_info *info);
3088 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3089 struct ifla_vf_stats *stats);
3090 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3091 int type);
3092
3093 int ib_query_pkey(struct ib_device *device,
3094 u8 port_num, u16 index, u16 *pkey);
3095
3096 int ib_modify_device(struct ib_device *device,
3097 int device_modify_mask,
3098 struct ib_device_modify *device_modify);
3099
3100 int ib_modify_port(struct ib_device *device,
3101 u8 port_num, int port_modify_mask,
3102 struct ib_port_modify *port_modify);
3103
3104 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3105 u8 *port_num, u16 *index);
3106
3107 int ib_find_pkey(struct ib_device *device,
3108 u8 port_num, u16 pkey, u16 *index);
3109
3110 enum ib_pd_flags {
3111 /*
3112 * Create a memory registration for all memory in the system and place
3113 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3114 * ULPs to avoid the overhead of dynamic MRs.
3115 *
3116 * This flag is generally considered unsafe and must only be used in
3117 * extremly trusted environments. Every use of it will log a warning
3118 * in the kernel log.
3119 */
3120 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3121 };
3122
3123 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3124 const char *caller);
3125 #define ib_alloc_pd(device, flags) \
3126 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3127 void ib_dealloc_pd(struct ib_pd *pd);
3128
3129 /**
3130 * rdma_create_ah - Creates an address handle for the given address vector.
3131 * @pd: The protection domain associated with the address handle.
3132 * @ah_attr: The attributes of the address vector.
3133 *
3134 * The address handle is used to reference a local or global destination
3135 * in all UD QP post sends.
3136 */
3137 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
3138
3139 /**
3140 * rdma_create_user_ah - Creates an address handle for the given address vector.
3141 * It resolves destination mac address for ah attribute of RoCE type.
3142 * @pd: The protection domain associated with the address handle.
3143 * @ah_attr: The attributes of the address vector.
3144 * @udata: pointer to user's input output buffer information need by
3145 * provider driver.
3146 *
3147 * It returns 0 on success and returns appropriate error code on error.
3148 * The address handle is used to reference a local or global destination
3149 * in all UD QP post sends.
3150 */
3151 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3152 struct rdma_ah_attr *ah_attr,
3153 struct ib_udata *udata);
3154 /**
3155 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3156 * work completion.
3157 * @hdr: the L3 header to parse
3158 * @net_type: type of header to parse
3159 * @sgid: place to store source gid
3160 * @dgid: place to store destination gid
3161 */
3162 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3163 enum rdma_network_type net_type,
3164 union ib_gid *sgid, union ib_gid *dgid);
3165
3166 /**
3167 * ib_get_rdma_header_version - Get the header version
3168 * @hdr: the L3 header to parse
3169 */
3170 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3171
3172 /**
3173 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3174 * work completion.
3175 * @device: Device on which the received message arrived.
3176 * @port_num: Port on which the received message arrived.
3177 * @wc: Work completion associated with the received message.
3178 * @grh: References the received global route header. This parameter is
3179 * ignored unless the work completion indicates that the GRH is valid.
3180 * @ah_attr: Returned attributes that can be used when creating an address
3181 * handle for replying to the message.
3182 * When ib_init_ah_attr_from_wc() returns success,
3183 * (a) for IB link layer it optionally contains a reference to SGID attribute
3184 * when GRH is present for IB link layer.
3185 * (b) for RoCE link layer it contains a reference to SGID attribute.
3186 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3187 * attributes which are initialized using ib_init_ah_attr_from_wc().
3188 *
3189 */
3190 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3191 const struct ib_wc *wc, const struct ib_grh *grh,
3192 struct rdma_ah_attr *ah_attr);
3193
3194 /**
3195 * ib_create_ah_from_wc - Creates an address handle associated with the
3196 * sender of the specified work completion.
3197 * @pd: The protection domain associated with the address handle.
3198 * @wc: Work completion information associated with a received message.
3199 * @grh: References the received global route header. This parameter is
3200 * ignored unless the work completion indicates that the GRH is valid.
3201 * @port_num: The outbound port number to associate with the address.
3202 *
3203 * The address handle is used to reference a local or global destination
3204 * in all UD QP post sends.
3205 */
3206 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3207 const struct ib_grh *grh, u8 port_num);
3208
3209 /**
3210 * rdma_modify_ah - Modifies the address vector associated with an address
3211 * handle.
3212 * @ah: The address handle to modify.
3213 * @ah_attr: The new address vector attributes to associate with the
3214 * address handle.
3215 */
3216 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3217
3218 /**
3219 * rdma_query_ah - Queries the address vector associated with an address
3220 * handle.
3221 * @ah: The address handle to query.
3222 * @ah_attr: The address vector attributes associated with the address
3223 * handle.
3224 */
3225 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3226
3227 /**
3228 * rdma_destroy_ah - Destroys an address handle.
3229 * @ah: The address handle to destroy.
3230 */
3231 int rdma_destroy_ah(struct ib_ah *ah);
3232
3233 /**
3234 * ib_create_srq - Creates a SRQ associated with the specified protection
3235 * domain.
3236 * @pd: The protection domain associated with the SRQ.
3237 * @srq_init_attr: A list of initial attributes required to create the
3238 * SRQ. If SRQ creation succeeds, then the attributes are updated to
3239 * the actual capabilities of the created SRQ.
3240 *
3241 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3242 * requested size of the SRQ, and set to the actual values allocated
3243 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
3244 * will always be at least as large as the requested values.
3245 */
3246 struct ib_srq *ib_create_srq(struct ib_pd *pd,
3247 struct ib_srq_init_attr *srq_init_attr);
3248
3249 /**
3250 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3251 * @srq: The SRQ to modify.
3252 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3253 * the current values of selected SRQ attributes are returned.
3254 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3255 * are being modified.
3256 *
3257 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3258 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3259 * the number of receives queued drops below the limit.
3260 */
3261 int ib_modify_srq(struct ib_srq *srq,
3262 struct ib_srq_attr *srq_attr,
3263 enum ib_srq_attr_mask srq_attr_mask);
3264
3265 /**
3266 * ib_query_srq - Returns the attribute list and current values for the
3267 * specified SRQ.
3268 * @srq: The SRQ to query.
3269 * @srq_attr: The attributes of the specified SRQ.
3270 */
3271 int ib_query_srq(struct ib_srq *srq,
3272 struct ib_srq_attr *srq_attr);
3273
3274 /**
3275 * ib_destroy_srq - Destroys the specified SRQ.
3276 * @srq: The SRQ to destroy.
3277 */
3278 int ib_destroy_srq(struct ib_srq *srq);
3279
3280 /**
3281 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3282 * @srq: The SRQ to post the work request on.
3283 * @recv_wr: A list of work requests to post on the receive queue.
3284 * @bad_recv_wr: On an immediate failure, this parameter will reference
3285 * the work request that failed to be posted on the QP.
3286 */
ib_post_srq_recv(struct ib_srq * srq,const struct ib_recv_wr * recv_wr,const struct ib_recv_wr ** bad_recv_wr)3287 static inline int ib_post_srq_recv(struct ib_srq *srq,
3288 const struct ib_recv_wr *recv_wr,
3289 const struct ib_recv_wr **bad_recv_wr)
3290 {
3291 const struct ib_recv_wr *dummy;
3292
3293 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy);
3294 }
3295
3296 /**
3297 * ib_create_qp - Creates a QP associated with the specified protection
3298 * domain.
3299 * @pd: The protection domain associated with the QP.
3300 * @qp_init_attr: A list of initial attributes required to create the
3301 * QP. If QP creation succeeds, then the attributes are updated to
3302 * the actual capabilities of the created QP.
3303 */
3304 struct ib_qp *ib_create_qp(struct ib_pd *pd,
3305 struct ib_qp_init_attr *qp_init_attr);
3306
3307 /**
3308 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3309 * @qp: The QP to modify.
3310 * @attr: On input, specifies the QP attributes to modify. On output,
3311 * the current values of selected QP attributes are returned.
3312 * @attr_mask: A bit-mask used to specify which attributes of the QP
3313 * are being modified.
3314 * @udata: pointer to user's input output buffer information
3315 * are being modified.
3316 * It returns 0 on success and returns appropriate error code on error.
3317 */
3318 int ib_modify_qp_with_udata(struct ib_qp *qp,
3319 struct ib_qp_attr *attr,
3320 int attr_mask,
3321 struct ib_udata *udata);
3322
3323 /**
3324 * ib_modify_qp - Modifies the attributes for the specified QP and then
3325 * transitions the QP to the given state.
3326 * @qp: The QP to modify.
3327 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3328 * the current values of selected QP attributes are returned.
3329 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3330 * are being modified.
3331 */
3332 int ib_modify_qp(struct ib_qp *qp,
3333 struct ib_qp_attr *qp_attr,
3334 int qp_attr_mask);
3335
3336 /**
3337 * ib_query_qp - Returns the attribute list and current values for the
3338 * specified QP.
3339 * @qp: The QP to query.
3340 * @qp_attr: The attributes of the specified QP.
3341 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3342 * @qp_init_attr: Additional attributes of the selected QP.
3343 *
3344 * The qp_attr_mask may be used to limit the query to gathering only the
3345 * selected attributes.
3346 */
3347 int ib_query_qp(struct ib_qp *qp,
3348 struct ib_qp_attr *qp_attr,
3349 int qp_attr_mask,
3350 struct ib_qp_init_attr *qp_init_attr);
3351
3352 /**
3353 * ib_destroy_qp - Destroys the specified QP.
3354 * @qp: The QP to destroy.
3355 */
3356 int ib_destroy_qp(struct ib_qp *qp);
3357
3358 /**
3359 * ib_open_qp - Obtain a reference to an existing sharable QP.
3360 * @xrcd - XRC domain
3361 * @qp_open_attr: Attributes identifying the QP to open.
3362 *
3363 * Returns a reference to a sharable QP.
3364 */
3365 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3366 struct ib_qp_open_attr *qp_open_attr);
3367
3368 /**
3369 * ib_close_qp - Release an external reference to a QP.
3370 * @qp: The QP handle to release
3371 *
3372 * The opened QP handle is released by the caller. The underlying
3373 * shared QP is not destroyed until all internal references are released.
3374 */
3375 int ib_close_qp(struct ib_qp *qp);
3376
3377 /**
3378 * ib_post_send - Posts a list of work requests to the send queue of
3379 * the specified QP.
3380 * @qp: The QP to post the work request on.
3381 * @send_wr: A list of work requests to post on the send queue.
3382 * @bad_send_wr: On an immediate failure, this parameter will reference
3383 * the work request that failed to be posted on the QP.
3384 *
3385 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3386 * error is returned, the QP state shall not be affected,
3387 * ib_post_send() will return an immediate error after queueing any
3388 * earlier work requests in the list.
3389 */
ib_post_send(struct ib_qp * qp,const struct ib_send_wr * send_wr,const struct ib_send_wr ** bad_send_wr)3390 static inline int ib_post_send(struct ib_qp *qp,
3391 const struct ib_send_wr *send_wr,
3392 const struct ib_send_wr **bad_send_wr)
3393 {
3394 const struct ib_send_wr *dummy;
3395
3396 return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy);
3397 }
3398
3399 /**
3400 * ib_post_recv - Posts a list of work requests to the receive queue of
3401 * the specified QP.
3402 * @qp: The QP to post the work request on.
3403 * @recv_wr: A list of work requests to post on the receive queue.
3404 * @bad_recv_wr: On an immediate failure, this parameter will reference
3405 * the work request that failed to be posted on the QP.
3406 */
ib_post_recv(struct ib_qp * qp,const struct ib_recv_wr * recv_wr,const struct ib_recv_wr ** bad_recv_wr)3407 static inline int ib_post_recv(struct ib_qp *qp,
3408 const struct ib_recv_wr *recv_wr,
3409 const struct ib_recv_wr **bad_recv_wr)
3410 {
3411 const struct ib_recv_wr *dummy;
3412
3413 return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3414 }
3415
3416 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3417 int nr_cqe, int comp_vector,
3418 enum ib_poll_context poll_ctx, const char *caller);
3419 #define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3420 __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3421
3422 void ib_free_cq(struct ib_cq *cq);
3423 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3424
3425 /**
3426 * ib_create_cq - Creates a CQ on the specified device.
3427 * @device: The device on which to create the CQ.
3428 * @comp_handler: A user-specified callback that is invoked when a
3429 * completion event occurs on the CQ.
3430 * @event_handler: A user-specified callback that is invoked when an
3431 * asynchronous event not associated with a completion occurs on the CQ.
3432 * @cq_context: Context associated with the CQ returned to the user via
3433 * the associated completion and event handlers.
3434 * @cq_attr: The attributes the CQ should be created upon.
3435 *
3436 * Users can examine the cq structure to determine the actual CQ size.
3437 */
3438 struct ib_cq *__ib_create_cq(struct ib_device *device,
3439 ib_comp_handler comp_handler,
3440 void (*event_handler)(struct ib_event *, void *),
3441 void *cq_context,
3442 const struct ib_cq_init_attr *cq_attr,
3443 const char *caller);
3444 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3445 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3446
3447 /**
3448 * ib_resize_cq - Modifies the capacity of the CQ.
3449 * @cq: The CQ to resize.
3450 * @cqe: The minimum size of the CQ.
3451 *
3452 * Users can examine the cq structure to determine the actual CQ size.
3453 */
3454 int ib_resize_cq(struct ib_cq *cq, int cqe);
3455
3456 /**
3457 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3458 * @cq: The CQ to modify.
3459 * @cq_count: number of CQEs that will trigger an event
3460 * @cq_period: max period of time in usec before triggering an event
3461 *
3462 */
3463 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3464
3465 /**
3466 * ib_destroy_cq - Destroys the specified CQ.
3467 * @cq: The CQ to destroy.
3468 */
3469 int ib_destroy_cq(struct ib_cq *cq);
3470
3471 /**
3472 * ib_poll_cq - poll a CQ for completion(s)
3473 * @cq:the CQ being polled
3474 * @num_entries:maximum number of completions to return
3475 * @wc:array of at least @num_entries &struct ib_wc where completions
3476 * will be returned
3477 *
3478 * Poll a CQ for (possibly multiple) completions. If the return value
3479 * is < 0, an error occurred. If the return value is >= 0, it is the
3480 * number of completions returned. If the return value is
3481 * non-negative and < num_entries, then the CQ was emptied.
3482 */
ib_poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)3483 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3484 struct ib_wc *wc)
3485 {
3486 return cq->device->poll_cq(cq, num_entries, wc);
3487 }
3488
3489 /**
3490 * ib_req_notify_cq - Request completion notification on a CQ.
3491 * @cq: The CQ to generate an event for.
3492 * @flags:
3493 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3494 * to request an event on the next solicited event or next work
3495 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3496 * may also be |ed in to request a hint about missed events, as
3497 * described below.
3498 *
3499 * Return Value:
3500 * < 0 means an error occurred while requesting notification
3501 * == 0 means notification was requested successfully, and if
3502 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3503 * were missed and it is safe to wait for another event. In
3504 * this case is it guaranteed that any work completions added
3505 * to the CQ since the last CQ poll will trigger a completion
3506 * notification event.
3507 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3508 * in. It means that the consumer must poll the CQ again to
3509 * make sure it is empty to avoid missing an event because of a
3510 * race between requesting notification and an entry being
3511 * added to the CQ. This return value means it is possible
3512 * (but not guaranteed) that a work completion has been added
3513 * to the CQ since the last poll without triggering a
3514 * completion notification event.
3515 */
ib_req_notify_cq(struct ib_cq * cq,enum ib_cq_notify_flags flags)3516 static inline int ib_req_notify_cq(struct ib_cq *cq,
3517 enum ib_cq_notify_flags flags)
3518 {
3519 return cq->device->req_notify_cq(cq, flags);
3520 }
3521
3522 /**
3523 * ib_req_ncomp_notif - Request completion notification when there are
3524 * at least the specified number of unreaped completions on the CQ.
3525 * @cq: The CQ to generate an event for.
3526 * @wc_cnt: The number of unreaped completions that should be on the
3527 * CQ before an event is generated.
3528 */
ib_req_ncomp_notif(struct ib_cq * cq,int wc_cnt)3529 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3530 {
3531 return cq->device->req_ncomp_notif ?
3532 cq->device->req_ncomp_notif(cq, wc_cnt) :
3533 -ENOSYS;
3534 }
3535
3536 /**
3537 * ib_dma_mapping_error - check a DMA addr for error
3538 * @dev: The device for which the dma_addr was created
3539 * @dma_addr: The DMA address to check
3540 */
ib_dma_mapping_error(struct ib_device * dev,u64 dma_addr)3541 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3542 {
3543 return dma_mapping_error(dev->dma_device, dma_addr);
3544 }
3545
3546 /**
3547 * ib_dma_map_single - Map a kernel virtual address to DMA address
3548 * @dev: The device for which the dma_addr is to be created
3549 * @cpu_addr: The kernel virtual address
3550 * @size: The size of the region in bytes
3551 * @direction: The direction of the DMA
3552 */
ib_dma_map_single(struct ib_device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction)3553 static inline u64 ib_dma_map_single(struct ib_device *dev,
3554 void *cpu_addr, size_t size,
3555 enum dma_data_direction direction)
3556 {
3557 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3558 }
3559
3560 /**
3561 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3562 * @dev: The device for which the DMA address was created
3563 * @addr: The DMA address
3564 * @size: The size of the region in bytes
3565 * @direction: The direction of the DMA
3566 */
ib_dma_unmap_single(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)3567 static inline void ib_dma_unmap_single(struct ib_device *dev,
3568 u64 addr, size_t size,
3569 enum dma_data_direction direction)
3570 {
3571 dma_unmap_single(dev->dma_device, addr, size, direction);
3572 }
3573
3574 /**
3575 * ib_dma_map_page - Map a physical page to DMA address
3576 * @dev: The device for which the dma_addr is to be created
3577 * @page: The page to be mapped
3578 * @offset: The offset within the page
3579 * @size: The size of the region in bytes
3580 * @direction: The direction of the DMA
3581 */
ib_dma_map_page(struct ib_device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)3582 static inline u64 ib_dma_map_page(struct ib_device *dev,
3583 struct page *page,
3584 unsigned long offset,
3585 size_t size,
3586 enum dma_data_direction direction)
3587 {
3588 return dma_map_page(dev->dma_device, page, offset, size, direction);
3589 }
3590
3591 /**
3592 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3593 * @dev: The device for which the DMA address was created
3594 * @addr: The DMA address
3595 * @size: The size of the region in bytes
3596 * @direction: The direction of the DMA
3597 */
ib_dma_unmap_page(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)3598 static inline void ib_dma_unmap_page(struct ib_device *dev,
3599 u64 addr, size_t size,
3600 enum dma_data_direction direction)
3601 {
3602 dma_unmap_page(dev->dma_device, addr, size, direction);
3603 }
3604
3605 /**
3606 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3607 * @dev: The device for which the DMA addresses are to be created
3608 * @sg: The array of scatter/gather entries
3609 * @nents: The number of scatter/gather entries
3610 * @direction: The direction of the DMA
3611 */
ib_dma_map_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)3612 static inline int ib_dma_map_sg(struct ib_device *dev,
3613 struct scatterlist *sg, int nents,
3614 enum dma_data_direction direction)
3615 {
3616 return dma_map_sg(dev->dma_device, sg, nents, direction);
3617 }
3618
3619 /**
3620 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3621 * @dev: The device for which the DMA addresses were created
3622 * @sg: The array of scatter/gather entries
3623 * @nents: The number of scatter/gather entries
3624 * @direction: The direction of the DMA
3625 */
ib_dma_unmap_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)3626 static inline void ib_dma_unmap_sg(struct ib_device *dev,
3627 struct scatterlist *sg, int nents,
3628 enum dma_data_direction direction)
3629 {
3630 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3631 }
3632
ib_dma_map_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long dma_attrs)3633 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3634 struct scatterlist *sg, int nents,
3635 enum dma_data_direction direction,
3636 unsigned long dma_attrs)
3637 {
3638 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3639 dma_attrs);
3640 }
3641
ib_dma_unmap_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long dma_attrs)3642 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3643 struct scatterlist *sg, int nents,
3644 enum dma_data_direction direction,
3645 unsigned long dma_attrs)
3646 {
3647 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3648 }
3649 /**
3650 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3651 * @dev: The device for which the DMA addresses were created
3652 * @sg: The scatter/gather entry
3653 *
3654 * Note: this function is obsolete. To do: change all occurrences of
3655 * ib_sg_dma_address() into sg_dma_address().
3656 */
ib_sg_dma_address(struct ib_device * dev,struct scatterlist * sg)3657 static inline u64 ib_sg_dma_address(struct ib_device *dev,
3658 struct scatterlist *sg)
3659 {
3660 return sg_dma_address(sg);
3661 }
3662
3663 /**
3664 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3665 * @dev: The device for which the DMA addresses were created
3666 * @sg: The scatter/gather entry
3667 *
3668 * Note: this function is obsolete. To do: change all occurrences of
3669 * ib_sg_dma_len() into sg_dma_len().
3670 */
ib_sg_dma_len(struct ib_device * dev,struct scatterlist * sg)3671 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3672 struct scatterlist *sg)
3673 {
3674 return sg_dma_len(sg);
3675 }
3676
3677 /**
3678 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3679 * @dev: The device for which the DMA address was created
3680 * @addr: The DMA address
3681 * @size: The size of the region in bytes
3682 * @dir: The direction of the DMA
3683 */
ib_dma_sync_single_for_cpu(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)3684 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3685 u64 addr,
3686 size_t size,
3687 enum dma_data_direction dir)
3688 {
3689 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3690 }
3691
3692 /**
3693 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3694 * @dev: The device for which the DMA address was created
3695 * @addr: The DMA address
3696 * @size: The size of the region in bytes
3697 * @dir: The direction of the DMA
3698 */
ib_dma_sync_single_for_device(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)3699 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3700 u64 addr,
3701 size_t size,
3702 enum dma_data_direction dir)
3703 {
3704 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3705 }
3706
3707 /**
3708 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3709 * @dev: The device for which the DMA address is requested
3710 * @size: The size of the region to allocate in bytes
3711 * @dma_handle: A pointer for returning the DMA address of the region
3712 * @flag: memory allocator flags
3713 */
ib_dma_alloc_coherent(struct ib_device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)3714 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3715 size_t size,
3716 dma_addr_t *dma_handle,
3717 gfp_t flag)
3718 {
3719 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3720 }
3721
3722 /**
3723 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3724 * @dev: The device for which the DMA addresses were allocated
3725 * @size: The size of the region
3726 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3727 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3728 */
ib_dma_free_coherent(struct ib_device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)3729 static inline void ib_dma_free_coherent(struct ib_device *dev,
3730 size_t size, void *cpu_addr,
3731 dma_addr_t dma_handle)
3732 {
3733 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3734 }
3735
3736 /**
3737 * ib_dereg_mr - Deregisters a memory region and removes it from the
3738 * HCA translation table.
3739 * @mr: The memory region to deregister.
3740 *
3741 * This function can fail, if the memory region has memory windows bound to it.
3742 */
3743 int ib_dereg_mr(struct ib_mr *mr);
3744
3745 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3746 enum ib_mr_type mr_type,
3747 u32 max_num_sg);
3748
3749 /**
3750 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3751 * R_Key and L_Key.
3752 * @mr - struct ib_mr pointer to be updated.
3753 * @newkey - new key to be used.
3754 */
ib_update_fast_reg_key(struct ib_mr * mr,u8 newkey)3755 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3756 {
3757 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3758 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3759 }
3760
3761 /**
3762 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3763 * for calculating a new rkey for type 2 memory windows.
3764 * @rkey - the rkey to increment.
3765 */
ib_inc_rkey(u32 rkey)3766 static inline u32 ib_inc_rkey(u32 rkey)
3767 {
3768 const u32 mask = 0x000000ff;
3769 return ((rkey + 1) & mask) | (rkey & ~mask);
3770 }
3771
3772 /**
3773 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3774 * @pd: The protection domain associated with the unmapped region.
3775 * @mr_access_flags: Specifies the memory access rights.
3776 * @fmr_attr: Attributes of the unmapped region.
3777 *
3778 * A fast memory region must be mapped before it can be used as part of
3779 * a work request.
3780 */
3781 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3782 int mr_access_flags,
3783 struct ib_fmr_attr *fmr_attr);
3784
3785 /**
3786 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3787 * @fmr: The fast memory region to associate with the pages.
3788 * @page_list: An array of physical pages to map to the fast memory region.
3789 * @list_len: The number of pages in page_list.
3790 * @iova: The I/O virtual address to use with the mapped region.
3791 */
ib_map_phys_fmr(struct ib_fmr * fmr,u64 * page_list,int list_len,u64 iova)3792 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3793 u64 *page_list, int list_len,
3794 u64 iova)
3795 {
3796 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3797 }
3798
3799 /**
3800 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3801 * @fmr_list: A linked list of fast memory regions to unmap.
3802 */
3803 int ib_unmap_fmr(struct list_head *fmr_list);
3804
3805 /**
3806 * ib_dealloc_fmr - Deallocates a fast memory region.
3807 * @fmr: The fast memory region to deallocate.
3808 */
3809 int ib_dealloc_fmr(struct ib_fmr *fmr);
3810
3811 /**
3812 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3813 * @qp: QP to attach to the multicast group. The QP must be type
3814 * IB_QPT_UD.
3815 * @gid: Multicast group GID.
3816 * @lid: Multicast group LID in host byte order.
3817 *
3818 * In order to send and receive multicast packets, subnet
3819 * administration must have created the multicast group and configured
3820 * the fabric appropriately. The port associated with the specified
3821 * QP must also be a member of the multicast group.
3822 */
3823 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3824
3825 /**
3826 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3827 * @qp: QP to detach from the multicast group.
3828 * @gid: Multicast group GID.
3829 * @lid: Multicast group LID in host byte order.
3830 */
3831 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3832
3833 /**
3834 * ib_alloc_xrcd - Allocates an XRC domain.
3835 * @device: The device on which to allocate the XRC domain.
3836 * @caller: Module name for kernel consumers
3837 */
3838 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3839 #define ib_alloc_xrcd(device) \
3840 __ib_alloc_xrcd((device), KBUILD_MODNAME)
3841
3842 /**
3843 * ib_dealloc_xrcd - Deallocates an XRC domain.
3844 * @xrcd: The XRC domain to deallocate.
3845 */
3846 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3847
ib_check_mr_access(int flags)3848 static inline int ib_check_mr_access(int flags)
3849 {
3850 /*
3851 * Local write permission is required if remote write or
3852 * remote atomic permission is also requested.
3853 */
3854 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3855 !(flags & IB_ACCESS_LOCAL_WRITE))
3856 return -EINVAL;
3857
3858 return 0;
3859 }
3860
ib_access_writable(int access_flags)3861 static inline bool ib_access_writable(int access_flags)
3862 {
3863 /*
3864 * We have writable memory backing the MR if any of the following
3865 * access flags are set. "Local write" and "remote write" obviously
3866 * require write access. "Remote atomic" can do things like fetch and
3867 * add, which will modify memory, and "MW bind" can change permissions
3868 * by binding a window.
3869 */
3870 return access_flags &
3871 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
3872 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
3873 }
3874
3875 /**
3876 * ib_check_mr_status: lightweight check of MR status.
3877 * This routine may provide status checks on a selected
3878 * ib_mr. first use is for signature status check.
3879 *
3880 * @mr: A memory region.
3881 * @check_mask: Bitmask of which checks to perform from
3882 * ib_mr_status_check enumeration.
3883 * @mr_status: The container of relevant status checks.
3884 * failed checks will be indicated in the status bitmask
3885 * and the relevant info shall be in the error item.
3886 */
3887 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3888 struct ib_mr_status *mr_status);
3889
3890 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3891 u16 pkey, const union ib_gid *gid,
3892 const struct sockaddr *addr);
3893 struct ib_wq *ib_create_wq(struct ib_pd *pd,
3894 struct ib_wq_init_attr *init_attr);
3895 int ib_destroy_wq(struct ib_wq *wq);
3896 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3897 u32 wq_attr_mask);
3898 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3899 struct ib_rwq_ind_table_init_attr*
3900 wq_ind_table_init_attr);
3901 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3902
3903 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3904 unsigned int *sg_offset, unsigned int page_size);
3905
3906 static inline int
ib_map_mr_sg_zbva(struct ib_mr * mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset,unsigned int page_size)3907 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3908 unsigned int *sg_offset, unsigned int page_size)
3909 {
3910 int n;
3911
3912 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3913 mr->iova = 0;
3914
3915 return n;
3916 }
3917
3918 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3919 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3920
3921 void ib_drain_rq(struct ib_qp *qp);
3922 void ib_drain_sq(struct ib_qp *qp);
3923 void ib_drain_qp(struct ib_qp *qp);
3924
3925 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3926
rdma_ah_retrieve_dmac(struct rdma_ah_attr * attr)3927 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3928 {
3929 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3930 return attr->roce.dmac;
3931 return NULL;
3932 }
3933
rdma_ah_set_dlid(struct rdma_ah_attr * attr,u32 dlid)3934 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3935 {
3936 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3937 attr->ib.dlid = (u16)dlid;
3938 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3939 attr->opa.dlid = dlid;
3940 }
3941
rdma_ah_get_dlid(const struct rdma_ah_attr * attr)3942 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3943 {
3944 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3945 return attr->ib.dlid;
3946 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3947 return attr->opa.dlid;
3948 return 0;
3949 }
3950
rdma_ah_set_sl(struct rdma_ah_attr * attr,u8 sl)3951 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3952 {
3953 attr->sl = sl;
3954 }
3955
rdma_ah_get_sl(const struct rdma_ah_attr * attr)3956 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3957 {
3958 return attr->sl;
3959 }
3960
rdma_ah_set_path_bits(struct rdma_ah_attr * attr,u8 src_path_bits)3961 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3962 u8 src_path_bits)
3963 {
3964 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3965 attr->ib.src_path_bits = src_path_bits;
3966 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3967 attr->opa.src_path_bits = src_path_bits;
3968 }
3969
rdma_ah_get_path_bits(const struct rdma_ah_attr * attr)3970 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3971 {
3972 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3973 return attr->ib.src_path_bits;
3974 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3975 return attr->opa.src_path_bits;
3976 return 0;
3977 }
3978
rdma_ah_set_make_grd(struct rdma_ah_attr * attr,bool make_grd)3979 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3980 bool make_grd)
3981 {
3982 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3983 attr->opa.make_grd = make_grd;
3984 }
3985
rdma_ah_get_make_grd(const struct rdma_ah_attr * attr)3986 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3987 {
3988 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3989 return attr->opa.make_grd;
3990 return false;
3991 }
3992
rdma_ah_set_port_num(struct rdma_ah_attr * attr,u8 port_num)3993 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3994 {
3995 attr->port_num = port_num;
3996 }
3997
rdma_ah_get_port_num(const struct rdma_ah_attr * attr)3998 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3999 {
4000 return attr->port_num;
4001 }
4002
rdma_ah_set_static_rate(struct rdma_ah_attr * attr,u8 static_rate)4003 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4004 u8 static_rate)
4005 {
4006 attr->static_rate = static_rate;
4007 }
4008
rdma_ah_get_static_rate(const struct rdma_ah_attr * attr)4009 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4010 {
4011 return attr->static_rate;
4012 }
4013
rdma_ah_set_ah_flags(struct rdma_ah_attr * attr,enum ib_ah_flags flag)4014 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4015 enum ib_ah_flags flag)
4016 {
4017 attr->ah_flags = flag;
4018 }
4019
4020 static inline enum ib_ah_flags
rdma_ah_get_ah_flags(const struct rdma_ah_attr * attr)4021 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4022 {
4023 return attr->ah_flags;
4024 }
4025
4026 static inline const struct ib_global_route
rdma_ah_read_grh(const struct rdma_ah_attr * attr)4027 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4028 {
4029 return &attr->grh;
4030 }
4031
4032 /*To retrieve and modify the grh */
4033 static inline struct ib_global_route
rdma_ah_retrieve_grh(struct rdma_ah_attr * attr)4034 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4035 {
4036 return &attr->grh;
4037 }
4038
rdma_ah_set_dgid_raw(struct rdma_ah_attr * attr,void * dgid)4039 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4040 {
4041 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4042
4043 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4044 }
4045
rdma_ah_set_subnet_prefix(struct rdma_ah_attr * attr,__be64 prefix)4046 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4047 __be64 prefix)
4048 {
4049 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4050
4051 grh->dgid.global.subnet_prefix = prefix;
4052 }
4053
rdma_ah_set_interface_id(struct rdma_ah_attr * attr,__be64 if_id)4054 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4055 __be64 if_id)
4056 {
4057 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4058
4059 grh->dgid.global.interface_id = if_id;
4060 }
4061
rdma_ah_set_grh(struct rdma_ah_attr * attr,union ib_gid * dgid,u32 flow_label,u8 sgid_index,u8 hop_limit,u8 traffic_class)4062 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4063 union ib_gid *dgid, u32 flow_label,
4064 u8 sgid_index, u8 hop_limit,
4065 u8 traffic_class)
4066 {
4067 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4068
4069 attr->ah_flags = IB_AH_GRH;
4070 if (dgid)
4071 grh->dgid = *dgid;
4072 grh->flow_label = flow_label;
4073 grh->sgid_index = sgid_index;
4074 grh->hop_limit = hop_limit;
4075 grh->traffic_class = traffic_class;
4076 grh->sgid_attr = NULL;
4077 }
4078
4079 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4080 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4081 u32 flow_label, u8 hop_limit, u8 traffic_class,
4082 const struct ib_gid_attr *sgid_attr);
4083 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4084 const struct rdma_ah_attr *src);
4085 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4086 const struct rdma_ah_attr *new);
4087 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4088
4089 /**
4090 * rdma_ah_find_type - Return address handle type.
4091 *
4092 * @dev: Device to be checked
4093 * @port_num: Port number
4094 */
rdma_ah_find_type(struct ib_device * dev,u8 port_num)4095 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4096 u8 port_num)
4097 {
4098 if (rdma_protocol_roce(dev, port_num))
4099 return RDMA_AH_ATTR_TYPE_ROCE;
4100 if (rdma_protocol_ib(dev, port_num)) {
4101 if (rdma_cap_opa_ah(dev, port_num))
4102 return RDMA_AH_ATTR_TYPE_OPA;
4103 return RDMA_AH_ATTR_TYPE_IB;
4104 }
4105
4106 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4107 }
4108
4109 /**
4110 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4111 * In the current implementation the only way to get
4112 * get the 32bit lid is from other sources for OPA.
4113 * For IB, lids will always be 16bits so cast the
4114 * value accordingly.
4115 *
4116 * @lid: A 32bit LID
4117 */
ib_lid_cpu16(u32 lid)4118 static inline u16 ib_lid_cpu16(u32 lid)
4119 {
4120 WARN_ON_ONCE(lid & 0xFFFF0000);
4121 return (u16)lid;
4122 }
4123
4124 /**
4125 * ib_lid_be16 - Return lid in 16bit BE encoding.
4126 *
4127 * @lid: A 32bit LID
4128 */
ib_lid_be16(u32 lid)4129 static inline __be16 ib_lid_be16(u32 lid)
4130 {
4131 WARN_ON_ONCE(lid & 0xFFFF0000);
4132 return cpu_to_be16((u16)lid);
4133 }
4134
4135 /**
4136 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4137 * vector
4138 * @device: the rdma device
4139 * @comp_vector: index of completion vector
4140 *
4141 * Returns NULL on failure, otherwise a corresponding cpu map of the
4142 * completion vector (returns all-cpus map if the device driver doesn't
4143 * implement get_vector_affinity).
4144 */
4145 static inline const struct cpumask *
ib_get_vector_affinity(struct ib_device * device,int comp_vector)4146 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4147 {
4148 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4149 !device->get_vector_affinity)
4150 return NULL;
4151
4152 return device->get_vector_affinity(device, comp_vector);
4153
4154 }
4155
ib_set_flow(struct ib_uobject * uobj,struct ib_flow * ibflow,struct ib_qp * qp,struct ib_device * device)4156 static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
4157 struct ib_qp *qp, struct ib_device *device)
4158 {
4159 uobj->object = ibflow;
4160 ibflow->uobject = uobj;
4161
4162 if (qp) {
4163 atomic_inc(&qp->usecnt);
4164 ibflow->qp = qp;
4165 }
4166
4167 ibflow->device = device;
4168 }
4169
4170 /**
4171 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4172 * and add their gids, as needed, to the relevant RoCE devices.
4173 *
4174 * @device: the rdma device
4175 */
4176 void rdma_roce_rescan_device(struct ib_device *ibdev);
4177
4178 struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile);
4179
4180 int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
4181 struct uverbs_attr_bundle *attrs);
4182 #endif /* IB_VERBS_H */
4183