1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41 
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <linux/socket.h>
52 #include <linux/irq_poll.h>
53 #include <uapi/linux/if_ether.h>
54 #include <net/ipv6.h>
55 #include <net/ip.h>
56 #include <linux/string.h>
57 #include <linux/slab.h>
58 #include <linux/netdevice.h>
59 
60 #include <linux/if_link.h>
61 #include <linux/atomic.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/uaccess.h>
64 #include <linux/cgroup_rdma.h>
65 #include <uapi/rdma/ib_user_verbs.h>
66 #include <rdma/restrack.h>
67 #include <uapi/rdma/rdma_user_ioctl.h>
68 #include <uapi/rdma/ib_user_ioctl_verbs.h>
69 
70 #define IB_FW_VERSION_NAME_MAX	ETHTOOL_FWVERS_LEN
71 
72 extern struct workqueue_struct *ib_wq;
73 extern struct workqueue_struct *ib_comp_wq;
74 extern struct workqueue_struct *ib_comp_unbound_wq;
75 
76 union ib_gid {
77 	u8	raw[16];
78 	struct {
79 		__be64	subnet_prefix;
80 		__be64	interface_id;
81 	} global;
82 };
83 
84 extern union ib_gid zgid;
85 
86 enum ib_gid_type {
87 	/* If link layer is Ethernet, this is RoCE V1 */
88 	IB_GID_TYPE_IB        = 0,
89 	IB_GID_TYPE_ROCE      = 0,
90 	IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
91 	IB_GID_TYPE_SIZE
92 };
93 
94 #define ROCE_V2_UDP_DPORT      4791
95 struct ib_gid_attr {
96 	struct net_device	*ndev;
97 	struct ib_device	*device;
98 	union ib_gid		gid;
99 	enum ib_gid_type	gid_type;
100 	u16			index;
101 	u8			port_num;
102 };
103 
104 enum rdma_node_type {
105 	/* IB values map to NodeInfo:NodeType. */
106 	RDMA_NODE_IB_CA 	= 1,
107 	RDMA_NODE_IB_SWITCH,
108 	RDMA_NODE_IB_ROUTER,
109 	RDMA_NODE_RNIC,
110 	RDMA_NODE_USNIC,
111 	RDMA_NODE_USNIC_UDP,
112 };
113 
114 enum {
115 	/* set the local administered indication */
116 	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
117 };
118 
119 enum rdma_transport_type {
120 	RDMA_TRANSPORT_IB,
121 	RDMA_TRANSPORT_IWARP,
122 	RDMA_TRANSPORT_USNIC,
123 	RDMA_TRANSPORT_USNIC_UDP
124 };
125 
126 enum rdma_protocol_type {
127 	RDMA_PROTOCOL_IB,
128 	RDMA_PROTOCOL_IBOE,
129 	RDMA_PROTOCOL_IWARP,
130 	RDMA_PROTOCOL_USNIC_UDP
131 };
132 
133 __attribute_const__ enum rdma_transport_type
134 rdma_node_get_transport(enum rdma_node_type node_type);
135 
136 enum rdma_network_type {
137 	RDMA_NETWORK_IB,
138 	RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
139 	RDMA_NETWORK_IPV4,
140 	RDMA_NETWORK_IPV6
141 };
142 
ib_network_to_gid_type(enum rdma_network_type network_type)143 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
144 {
145 	if (network_type == RDMA_NETWORK_IPV4 ||
146 	    network_type == RDMA_NETWORK_IPV6)
147 		return IB_GID_TYPE_ROCE_UDP_ENCAP;
148 
149 	/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
150 	return IB_GID_TYPE_IB;
151 }
152 
153 static inline enum rdma_network_type
rdma_gid_attr_network_type(const struct ib_gid_attr * attr)154 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
155 {
156 	if (attr->gid_type == IB_GID_TYPE_IB)
157 		return RDMA_NETWORK_IB;
158 
159 	if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
160 		return RDMA_NETWORK_IPV4;
161 	else
162 		return RDMA_NETWORK_IPV6;
163 }
164 
165 enum rdma_link_layer {
166 	IB_LINK_LAYER_UNSPECIFIED,
167 	IB_LINK_LAYER_INFINIBAND,
168 	IB_LINK_LAYER_ETHERNET,
169 };
170 
171 enum ib_device_cap_flags {
172 	IB_DEVICE_RESIZE_MAX_WR			= (1 << 0),
173 	IB_DEVICE_BAD_PKEY_CNTR			= (1 << 1),
174 	IB_DEVICE_BAD_QKEY_CNTR			= (1 << 2),
175 	IB_DEVICE_RAW_MULTI			= (1 << 3),
176 	IB_DEVICE_AUTO_PATH_MIG			= (1 << 4),
177 	IB_DEVICE_CHANGE_PHY_PORT		= (1 << 5),
178 	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
179 	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
180 	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
181 	/* Not in use, former INIT_TYPE		= (1 << 9),*/
182 	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
183 	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
184 	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
185 	IB_DEVICE_SRQ_RESIZE			= (1 << 13),
186 	IB_DEVICE_N_NOTIFY_CQ			= (1 << 14),
187 
188 	/*
189 	 * This device supports a per-device lkey or stag that can be
190 	 * used without performing a memory registration for the local
191 	 * memory.  Note that ULPs should never check this flag, but
192 	 * instead of use the local_dma_lkey flag in the ib_pd structure,
193 	 * which will always contain a usable lkey.
194 	 */
195 	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
196 	/* Reserved, old SEND_W_INV		= (1 << 16),*/
197 	IB_DEVICE_MEM_WINDOW			= (1 << 17),
198 	/*
199 	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
200 	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
201 	 * messages and can verify the validity of checksum for
202 	 * incoming messages.  Setting this flag implies that the
203 	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
204 	 */
205 	IB_DEVICE_UD_IP_CSUM			= (1 << 18),
206 	IB_DEVICE_UD_TSO			= (1 << 19),
207 	IB_DEVICE_XRC				= (1 << 20),
208 
209 	/*
210 	 * This device supports the IB "base memory management extension",
211 	 * which includes support for fast registrations (IB_WR_REG_MR,
212 	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
213 	 * also be set by any iWarp device which must support FRs to comply
214 	 * to the iWarp verbs spec.  iWarp devices also support the
215 	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
216 	 * stag.
217 	 */
218 	IB_DEVICE_MEM_MGT_EXTENSIONS		= (1 << 21),
219 	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	= (1 << 22),
220 	IB_DEVICE_MEM_WINDOW_TYPE_2A		= (1 << 23),
221 	IB_DEVICE_MEM_WINDOW_TYPE_2B		= (1 << 24),
222 	IB_DEVICE_RC_IP_CSUM			= (1 << 25),
223 	/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
224 	IB_DEVICE_RAW_IP_CSUM			= (1 << 26),
225 	/*
226 	 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
227 	 * support execution of WQEs that involve synchronization
228 	 * of I/O operations with single completion queue managed
229 	 * by hardware.
230 	 */
231 	IB_DEVICE_CROSS_CHANNEL			= (1 << 27),
232 	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
233 	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
234 	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
235 	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
236 	IB_DEVICE_VIRTUAL_FUNCTION		= (1ULL << 33),
237 	/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
238 	IB_DEVICE_RAW_SCATTER_FCS		= (1ULL << 34),
239 	IB_DEVICE_RDMA_NETDEV_OPA_VNIC		= (1ULL << 35),
240 	/* The device supports padding incoming writes to cacheline. */
241 	IB_DEVICE_PCI_WRITE_END_PADDING		= (1ULL << 36),
242 };
243 
244 enum ib_signature_prot_cap {
245 	IB_PROT_T10DIF_TYPE_1 = 1,
246 	IB_PROT_T10DIF_TYPE_2 = 1 << 1,
247 	IB_PROT_T10DIF_TYPE_3 = 1 << 2,
248 };
249 
250 enum ib_signature_guard_cap {
251 	IB_GUARD_T10DIF_CRC	= 1,
252 	IB_GUARD_T10DIF_CSUM	= 1 << 1,
253 };
254 
255 enum ib_atomic_cap {
256 	IB_ATOMIC_NONE,
257 	IB_ATOMIC_HCA,
258 	IB_ATOMIC_GLOB
259 };
260 
261 enum ib_odp_general_cap_bits {
262 	IB_ODP_SUPPORT		= 1 << 0,
263 	IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
264 };
265 
266 enum ib_odp_transport_cap_bits {
267 	IB_ODP_SUPPORT_SEND	= 1 << 0,
268 	IB_ODP_SUPPORT_RECV	= 1 << 1,
269 	IB_ODP_SUPPORT_WRITE	= 1 << 2,
270 	IB_ODP_SUPPORT_READ	= 1 << 3,
271 	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
272 };
273 
274 struct ib_odp_caps {
275 	uint64_t general_caps;
276 	struct {
277 		uint32_t  rc_odp_caps;
278 		uint32_t  uc_odp_caps;
279 		uint32_t  ud_odp_caps;
280 	} per_transport_caps;
281 };
282 
283 struct ib_rss_caps {
284 	/* Corresponding bit will be set if qp type from
285 	 * 'enum ib_qp_type' is supported, e.g.
286 	 * supported_qpts |= 1 << IB_QPT_UD
287 	 */
288 	u32 supported_qpts;
289 	u32 max_rwq_indirection_tables;
290 	u32 max_rwq_indirection_table_size;
291 };
292 
293 enum ib_tm_cap_flags {
294 	/*  Support tag matching with rendezvous offload for RC transport */
295 	IB_TM_CAP_RNDV_RC = 1 << 0,
296 };
297 
298 struct ib_tm_caps {
299 	/* Max size of RNDV header */
300 	u32 max_rndv_hdr_size;
301 	/* Max number of entries in tag matching list */
302 	u32 max_num_tags;
303 	/* From enum ib_tm_cap_flags */
304 	u32 flags;
305 	/* Max number of outstanding list operations */
306 	u32 max_ops;
307 	/* Max number of SGE in tag matching entry */
308 	u32 max_sge;
309 };
310 
311 struct ib_cq_init_attr {
312 	unsigned int	cqe;
313 	u32		comp_vector;
314 	u32		flags;
315 };
316 
317 enum ib_cq_attr_mask {
318 	IB_CQ_MODERATE = 1 << 0,
319 };
320 
321 struct ib_cq_caps {
322 	u16     max_cq_moderation_count;
323 	u16     max_cq_moderation_period;
324 };
325 
326 struct ib_dm_mr_attr {
327 	u64		length;
328 	u64		offset;
329 	u32		access_flags;
330 };
331 
332 struct ib_dm_alloc_attr {
333 	u64	length;
334 	u32	alignment;
335 	u32	flags;
336 };
337 
338 struct ib_device_attr {
339 	u64			fw_ver;
340 	__be64			sys_image_guid;
341 	u64			max_mr_size;
342 	u64			page_size_cap;
343 	u32			vendor_id;
344 	u32			vendor_part_id;
345 	u32			hw_ver;
346 	int			max_qp;
347 	int			max_qp_wr;
348 	u64			device_cap_flags;
349 	int			max_send_sge;
350 	int			max_recv_sge;
351 	int			max_sge_rd;
352 	int			max_cq;
353 	int			max_cqe;
354 	int			max_mr;
355 	int			max_pd;
356 	int			max_qp_rd_atom;
357 	int			max_ee_rd_atom;
358 	int			max_res_rd_atom;
359 	int			max_qp_init_rd_atom;
360 	int			max_ee_init_rd_atom;
361 	enum ib_atomic_cap	atomic_cap;
362 	enum ib_atomic_cap	masked_atomic_cap;
363 	int			max_ee;
364 	int			max_rdd;
365 	int			max_mw;
366 	int			max_raw_ipv6_qp;
367 	int			max_raw_ethy_qp;
368 	int			max_mcast_grp;
369 	int			max_mcast_qp_attach;
370 	int			max_total_mcast_qp_attach;
371 	int			max_ah;
372 	int			max_fmr;
373 	int			max_map_per_fmr;
374 	int			max_srq;
375 	int			max_srq_wr;
376 	int			max_srq_sge;
377 	unsigned int		max_fast_reg_page_list_len;
378 	u16			max_pkeys;
379 	u8			local_ca_ack_delay;
380 	int			sig_prot_cap;
381 	int			sig_guard_cap;
382 	struct ib_odp_caps	odp_caps;
383 	uint64_t		timestamp_mask;
384 	uint64_t		hca_core_clock; /* in KHZ */
385 	struct ib_rss_caps	rss_caps;
386 	u32			max_wq_type_rq;
387 	u32			raw_packet_caps; /* Use ib_raw_packet_caps enum */
388 	struct ib_tm_caps	tm_caps;
389 	struct ib_cq_caps       cq_caps;
390 	u64			max_dm_size;
391 };
392 
393 enum ib_mtu {
394 	IB_MTU_256  = 1,
395 	IB_MTU_512  = 2,
396 	IB_MTU_1024 = 3,
397 	IB_MTU_2048 = 4,
398 	IB_MTU_4096 = 5
399 };
400 
ib_mtu_enum_to_int(enum ib_mtu mtu)401 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
402 {
403 	switch (mtu) {
404 	case IB_MTU_256:  return  256;
405 	case IB_MTU_512:  return  512;
406 	case IB_MTU_1024: return 1024;
407 	case IB_MTU_2048: return 2048;
408 	case IB_MTU_4096: return 4096;
409 	default: 	  return -1;
410 	}
411 }
412 
ib_mtu_int_to_enum(int mtu)413 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
414 {
415 	if (mtu >= 4096)
416 		return IB_MTU_4096;
417 	else if (mtu >= 2048)
418 		return IB_MTU_2048;
419 	else if (mtu >= 1024)
420 		return IB_MTU_1024;
421 	else if (mtu >= 512)
422 		return IB_MTU_512;
423 	else
424 		return IB_MTU_256;
425 }
426 
427 enum ib_port_state {
428 	IB_PORT_NOP		= 0,
429 	IB_PORT_DOWN		= 1,
430 	IB_PORT_INIT		= 2,
431 	IB_PORT_ARMED		= 3,
432 	IB_PORT_ACTIVE		= 4,
433 	IB_PORT_ACTIVE_DEFER	= 5
434 };
435 
436 enum ib_port_width {
437 	IB_WIDTH_1X	= 1,
438 	IB_WIDTH_4X	= 2,
439 	IB_WIDTH_8X	= 4,
440 	IB_WIDTH_12X	= 8
441 };
442 
ib_width_enum_to_int(enum ib_port_width width)443 static inline int ib_width_enum_to_int(enum ib_port_width width)
444 {
445 	switch (width) {
446 	case IB_WIDTH_1X:  return  1;
447 	case IB_WIDTH_4X:  return  4;
448 	case IB_WIDTH_8X:  return  8;
449 	case IB_WIDTH_12X: return 12;
450 	default: 	  return -1;
451 	}
452 }
453 
454 enum ib_port_speed {
455 	IB_SPEED_SDR	= 1,
456 	IB_SPEED_DDR	= 2,
457 	IB_SPEED_QDR	= 4,
458 	IB_SPEED_FDR10	= 8,
459 	IB_SPEED_FDR	= 16,
460 	IB_SPEED_EDR	= 32,
461 	IB_SPEED_HDR	= 64
462 };
463 
464 /**
465  * struct rdma_hw_stats
466  * @lock - Mutex to protect parallel write access to lifespan and values
467  *    of counters, which are 64bits and not guaranteeed to be written
468  *    atomicaly on 32bits systems.
469  * @timestamp - Used by the core code to track when the last update was
470  * @lifespan - Used by the core code to determine how old the counters
471  *   should be before being updated again.  Stored in jiffies, defaults
472  *   to 10 milliseconds, drivers can override the default be specifying
473  *   their own value during their allocation routine.
474  * @name - Array of pointers to static names used for the counters in
475  *   directory.
476  * @num_counters - How many hardware counters there are.  If name is
477  *   shorter than this number, a kernel oops will result.  Driver authors
478  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
479  *   in their code to prevent this.
480  * @value - Array of u64 counters that are accessed by the sysfs code and
481  *   filled in by the drivers get_stats routine
482  */
483 struct rdma_hw_stats {
484 	struct mutex	lock; /* Protect lifespan and values[] */
485 	unsigned long	timestamp;
486 	unsigned long	lifespan;
487 	const char * const *names;
488 	int		num_counters;
489 	u64		value[];
490 };
491 
492 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
493 /**
494  * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
495  *   for drivers.
496  * @names - Array of static const char *
497  * @num_counters - How many elements in array
498  * @lifespan - How many milliseconds between updates
499  */
rdma_alloc_hw_stats_struct(const char * const * names,int num_counters,unsigned long lifespan)500 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
501 		const char * const *names, int num_counters,
502 		unsigned long lifespan)
503 {
504 	struct rdma_hw_stats *stats;
505 
506 	stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
507 			GFP_KERNEL);
508 	if (!stats)
509 		return NULL;
510 	stats->names = names;
511 	stats->num_counters = num_counters;
512 	stats->lifespan = msecs_to_jiffies(lifespan);
513 
514 	return stats;
515 }
516 
517 
518 /* Define bits for the various functionality this port needs to be supported by
519  * the core.
520  */
521 /* Management                           0x00000FFF */
522 #define RDMA_CORE_CAP_IB_MAD            0x00000001
523 #define RDMA_CORE_CAP_IB_SMI            0x00000002
524 #define RDMA_CORE_CAP_IB_CM             0x00000004
525 #define RDMA_CORE_CAP_IW_CM             0x00000008
526 #define RDMA_CORE_CAP_IB_SA             0x00000010
527 #define RDMA_CORE_CAP_OPA_MAD           0x00000020
528 
529 /* Address format                       0x000FF000 */
530 #define RDMA_CORE_CAP_AF_IB             0x00001000
531 #define RDMA_CORE_CAP_ETH_AH            0x00002000
532 #define RDMA_CORE_CAP_OPA_AH            0x00004000
533 #define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
534 
535 /* Protocol                             0xFFF00000 */
536 #define RDMA_CORE_CAP_PROT_IB           0x00100000
537 #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
538 #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
539 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
540 #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
541 #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
542 
543 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
544 					| RDMA_CORE_CAP_PROT_ROCE     \
545 					| RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
546 
547 #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
548 					| RDMA_CORE_CAP_IB_MAD \
549 					| RDMA_CORE_CAP_IB_SMI \
550 					| RDMA_CORE_CAP_IB_CM  \
551 					| RDMA_CORE_CAP_IB_SA  \
552 					| RDMA_CORE_CAP_AF_IB)
553 #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
554 					| RDMA_CORE_CAP_IB_MAD  \
555 					| RDMA_CORE_CAP_IB_CM   \
556 					| RDMA_CORE_CAP_AF_IB   \
557 					| RDMA_CORE_CAP_ETH_AH)
558 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
559 					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
560 					| RDMA_CORE_CAP_IB_MAD  \
561 					| RDMA_CORE_CAP_IB_CM   \
562 					| RDMA_CORE_CAP_AF_IB   \
563 					| RDMA_CORE_CAP_ETH_AH)
564 #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
565 					| RDMA_CORE_CAP_IW_CM)
566 #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
567 					| RDMA_CORE_CAP_OPA_MAD)
568 
569 #define RDMA_CORE_PORT_RAW_PACKET	(RDMA_CORE_CAP_PROT_RAW_PACKET)
570 
571 #define RDMA_CORE_PORT_USNIC		(RDMA_CORE_CAP_PROT_USNIC)
572 
573 struct ib_port_attr {
574 	u64			subnet_prefix;
575 	enum ib_port_state	state;
576 	enum ib_mtu		max_mtu;
577 	enum ib_mtu		active_mtu;
578 	int			gid_tbl_len;
579 	unsigned int		ip_gids:1;
580 	/* This is the value from PortInfo CapabilityMask, defined by IBA */
581 	u32			port_cap_flags;
582 	u32			max_msg_sz;
583 	u32			bad_pkey_cntr;
584 	u32			qkey_viol_cntr;
585 	u16			pkey_tbl_len;
586 	u32			sm_lid;
587 	u32			lid;
588 	u8			lmc;
589 	u8			max_vl_num;
590 	u8			sm_sl;
591 	u8			subnet_timeout;
592 	u8			init_type_reply;
593 	u8			active_width;
594 	u8			active_speed;
595 	u8                      phys_state;
596 };
597 
598 enum ib_device_modify_flags {
599 	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
600 	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
601 };
602 
603 #define IB_DEVICE_NODE_DESC_MAX 64
604 
605 struct ib_device_modify {
606 	u64	sys_image_guid;
607 	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
608 };
609 
610 enum ib_port_modify_flags {
611 	IB_PORT_SHUTDOWN		= 1,
612 	IB_PORT_INIT_TYPE		= (1<<2),
613 	IB_PORT_RESET_QKEY_CNTR		= (1<<3),
614 	IB_PORT_OPA_MASK_CHG		= (1<<4)
615 };
616 
617 struct ib_port_modify {
618 	u32	set_port_cap_mask;
619 	u32	clr_port_cap_mask;
620 	u8	init_type;
621 };
622 
623 enum ib_event_type {
624 	IB_EVENT_CQ_ERR,
625 	IB_EVENT_QP_FATAL,
626 	IB_EVENT_QP_REQ_ERR,
627 	IB_EVENT_QP_ACCESS_ERR,
628 	IB_EVENT_COMM_EST,
629 	IB_EVENT_SQ_DRAINED,
630 	IB_EVENT_PATH_MIG,
631 	IB_EVENT_PATH_MIG_ERR,
632 	IB_EVENT_DEVICE_FATAL,
633 	IB_EVENT_PORT_ACTIVE,
634 	IB_EVENT_PORT_ERR,
635 	IB_EVENT_LID_CHANGE,
636 	IB_EVENT_PKEY_CHANGE,
637 	IB_EVENT_SM_CHANGE,
638 	IB_EVENT_SRQ_ERR,
639 	IB_EVENT_SRQ_LIMIT_REACHED,
640 	IB_EVENT_QP_LAST_WQE_REACHED,
641 	IB_EVENT_CLIENT_REREGISTER,
642 	IB_EVENT_GID_CHANGE,
643 	IB_EVENT_WQ_FATAL,
644 };
645 
646 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
647 
648 struct ib_event {
649 	struct ib_device	*device;
650 	union {
651 		struct ib_cq	*cq;
652 		struct ib_qp	*qp;
653 		struct ib_srq	*srq;
654 		struct ib_wq	*wq;
655 		u8		port_num;
656 	} element;
657 	enum ib_event_type	event;
658 };
659 
660 struct ib_event_handler {
661 	struct ib_device *device;
662 	void            (*handler)(struct ib_event_handler *, struct ib_event *);
663 	struct list_head  list;
664 };
665 
666 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
667 	do {							\
668 		(_ptr)->device  = _device;			\
669 		(_ptr)->handler = _handler;			\
670 		INIT_LIST_HEAD(&(_ptr)->list);			\
671 	} while (0)
672 
673 struct ib_global_route {
674 	const struct ib_gid_attr *sgid_attr;
675 	union ib_gid	dgid;
676 	u32		flow_label;
677 	u8		sgid_index;
678 	u8		hop_limit;
679 	u8		traffic_class;
680 };
681 
682 struct ib_grh {
683 	__be32		version_tclass_flow;
684 	__be16		paylen;
685 	u8		next_hdr;
686 	u8		hop_limit;
687 	union ib_gid	sgid;
688 	union ib_gid	dgid;
689 };
690 
691 union rdma_network_hdr {
692 	struct ib_grh ibgrh;
693 	struct {
694 		/* The IB spec states that if it's IPv4, the header
695 		 * is located in the last 20 bytes of the header.
696 		 */
697 		u8		reserved[20];
698 		struct iphdr	roce4grh;
699 	};
700 };
701 
702 #define IB_QPN_MASK		0xFFFFFF
703 
704 enum {
705 	IB_MULTICAST_QPN = 0xffffff
706 };
707 
708 #define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
709 #define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
710 
711 enum ib_ah_flags {
712 	IB_AH_GRH	= 1
713 };
714 
715 enum ib_rate {
716 	IB_RATE_PORT_CURRENT = 0,
717 	IB_RATE_2_5_GBPS = 2,
718 	IB_RATE_5_GBPS   = 5,
719 	IB_RATE_10_GBPS  = 3,
720 	IB_RATE_20_GBPS  = 6,
721 	IB_RATE_30_GBPS  = 4,
722 	IB_RATE_40_GBPS  = 7,
723 	IB_RATE_60_GBPS  = 8,
724 	IB_RATE_80_GBPS  = 9,
725 	IB_RATE_120_GBPS = 10,
726 	IB_RATE_14_GBPS  = 11,
727 	IB_RATE_56_GBPS  = 12,
728 	IB_RATE_112_GBPS = 13,
729 	IB_RATE_168_GBPS = 14,
730 	IB_RATE_25_GBPS  = 15,
731 	IB_RATE_100_GBPS = 16,
732 	IB_RATE_200_GBPS = 17,
733 	IB_RATE_300_GBPS = 18
734 };
735 
736 /**
737  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
738  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
739  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
740  * @rate: rate to convert.
741  */
742 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
743 
744 /**
745  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
746  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
747  * @rate: rate to convert.
748  */
749 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
750 
751 
752 /**
753  * enum ib_mr_type - memory region type
754  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
755  *                            normal registration
756  * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
757  *                            signature operations (data-integrity
758  *                            capable regions)
759  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
760  *                            register any arbitrary sg lists (without
761  *                            the normal mr constraints - see
762  *                            ib_map_mr_sg)
763  */
764 enum ib_mr_type {
765 	IB_MR_TYPE_MEM_REG,
766 	IB_MR_TYPE_SIGNATURE,
767 	IB_MR_TYPE_SG_GAPS,
768 };
769 
770 /**
771  * Signature types
772  * IB_SIG_TYPE_NONE: Unprotected.
773  * IB_SIG_TYPE_T10_DIF: Type T10-DIF
774  */
775 enum ib_signature_type {
776 	IB_SIG_TYPE_NONE,
777 	IB_SIG_TYPE_T10_DIF,
778 };
779 
780 /**
781  * Signature T10-DIF block-guard types
782  * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
783  * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
784  */
785 enum ib_t10_dif_bg_type {
786 	IB_T10DIF_CRC,
787 	IB_T10DIF_CSUM
788 };
789 
790 /**
791  * struct ib_t10_dif_domain - Parameters specific for T10-DIF
792  *     domain.
793  * @bg_type: T10-DIF block guard type (CRC|CSUM)
794  * @pi_interval: protection information interval.
795  * @bg: seed of guard computation.
796  * @app_tag: application tag of guard block
797  * @ref_tag: initial guard block reference tag.
798  * @ref_remap: Indicate wethear the reftag increments each block
799  * @app_escape: Indicate to skip block check if apptag=0xffff
800  * @ref_escape: Indicate to skip block check if reftag=0xffffffff
801  * @apptag_check_mask: check bitmask of application tag.
802  */
803 struct ib_t10_dif_domain {
804 	enum ib_t10_dif_bg_type bg_type;
805 	u16			pi_interval;
806 	u16			bg;
807 	u16			app_tag;
808 	u32			ref_tag;
809 	bool			ref_remap;
810 	bool			app_escape;
811 	bool			ref_escape;
812 	u16			apptag_check_mask;
813 };
814 
815 /**
816  * struct ib_sig_domain - Parameters for signature domain
817  * @sig_type: specific signauture type
818  * @sig: union of all signature domain attributes that may
819  *     be used to set domain layout.
820  */
821 struct ib_sig_domain {
822 	enum ib_signature_type sig_type;
823 	union {
824 		struct ib_t10_dif_domain dif;
825 	} sig;
826 };
827 
828 /**
829  * struct ib_sig_attrs - Parameters for signature handover operation
830  * @check_mask: bitmask for signature byte check (8 bytes)
831  * @mem: memory domain layout desciptor.
832  * @wire: wire domain layout desciptor.
833  */
834 struct ib_sig_attrs {
835 	u8			check_mask;
836 	struct ib_sig_domain	mem;
837 	struct ib_sig_domain	wire;
838 };
839 
840 enum ib_sig_err_type {
841 	IB_SIG_BAD_GUARD,
842 	IB_SIG_BAD_REFTAG,
843 	IB_SIG_BAD_APPTAG,
844 };
845 
846 /**
847  * Signature check masks (8 bytes in total) according to the T10-PI standard:
848  *  -------- -------- ------------
849  * | GUARD  | APPTAG |   REFTAG   |
850  * |  2B    |  2B    |    4B      |
851  *  -------- -------- ------------
852  */
853 enum {
854 	IB_SIG_CHECK_GUARD	= 0xc0,
855 	IB_SIG_CHECK_APPTAG	= 0x30,
856 	IB_SIG_CHECK_REFTAG	= 0x0f,
857 };
858 
859 /**
860  * struct ib_sig_err - signature error descriptor
861  */
862 struct ib_sig_err {
863 	enum ib_sig_err_type	err_type;
864 	u32			expected;
865 	u32			actual;
866 	u64			sig_err_offset;
867 	u32			key;
868 };
869 
870 enum ib_mr_status_check {
871 	IB_MR_CHECK_SIG_STATUS = 1,
872 };
873 
874 /**
875  * struct ib_mr_status - Memory region status container
876  *
877  * @fail_status: Bitmask of MR checks status. For each
878  *     failed check a corresponding status bit is set.
879  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
880  *     failure.
881  */
882 struct ib_mr_status {
883 	u32		    fail_status;
884 	struct ib_sig_err   sig_err;
885 };
886 
887 /**
888  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
889  * enum.
890  * @mult: multiple to convert.
891  */
892 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
893 
894 enum rdma_ah_attr_type {
895 	RDMA_AH_ATTR_TYPE_UNDEFINED,
896 	RDMA_AH_ATTR_TYPE_IB,
897 	RDMA_AH_ATTR_TYPE_ROCE,
898 	RDMA_AH_ATTR_TYPE_OPA,
899 };
900 
901 struct ib_ah_attr {
902 	u16			dlid;
903 	u8			src_path_bits;
904 };
905 
906 struct roce_ah_attr {
907 	u8			dmac[ETH_ALEN];
908 };
909 
910 struct opa_ah_attr {
911 	u32			dlid;
912 	u8			src_path_bits;
913 	bool			make_grd;
914 };
915 
916 struct rdma_ah_attr {
917 	struct ib_global_route	grh;
918 	u8			sl;
919 	u8			static_rate;
920 	u8			port_num;
921 	u8			ah_flags;
922 	enum rdma_ah_attr_type type;
923 	union {
924 		struct ib_ah_attr ib;
925 		struct roce_ah_attr roce;
926 		struct opa_ah_attr opa;
927 	};
928 };
929 
930 enum ib_wc_status {
931 	IB_WC_SUCCESS,
932 	IB_WC_LOC_LEN_ERR,
933 	IB_WC_LOC_QP_OP_ERR,
934 	IB_WC_LOC_EEC_OP_ERR,
935 	IB_WC_LOC_PROT_ERR,
936 	IB_WC_WR_FLUSH_ERR,
937 	IB_WC_MW_BIND_ERR,
938 	IB_WC_BAD_RESP_ERR,
939 	IB_WC_LOC_ACCESS_ERR,
940 	IB_WC_REM_INV_REQ_ERR,
941 	IB_WC_REM_ACCESS_ERR,
942 	IB_WC_REM_OP_ERR,
943 	IB_WC_RETRY_EXC_ERR,
944 	IB_WC_RNR_RETRY_EXC_ERR,
945 	IB_WC_LOC_RDD_VIOL_ERR,
946 	IB_WC_REM_INV_RD_REQ_ERR,
947 	IB_WC_REM_ABORT_ERR,
948 	IB_WC_INV_EECN_ERR,
949 	IB_WC_INV_EEC_STATE_ERR,
950 	IB_WC_FATAL_ERR,
951 	IB_WC_RESP_TIMEOUT_ERR,
952 	IB_WC_GENERAL_ERR
953 };
954 
955 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
956 
957 enum ib_wc_opcode {
958 	IB_WC_SEND,
959 	IB_WC_RDMA_WRITE,
960 	IB_WC_RDMA_READ,
961 	IB_WC_COMP_SWAP,
962 	IB_WC_FETCH_ADD,
963 	IB_WC_LSO,
964 	IB_WC_LOCAL_INV,
965 	IB_WC_REG_MR,
966 	IB_WC_MASKED_COMP_SWAP,
967 	IB_WC_MASKED_FETCH_ADD,
968 /*
969  * Set value of IB_WC_RECV so consumers can test if a completion is a
970  * receive by testing (opcode & IB_WC_RECV).
971  */
972 	IB_WC_RECV			= 1 << 7,
973 	IB_WC_RECV_RDMA_WITH_IMM
974 };
975 
976 enum ib_wc_flags {
977 	IB_WC_GRH		= 1,
978 	IB_WC_WITH_IMM		= (1<<1),
979 	IB_WC_WITH_INVALIDATE	= (1<<2),
980 	IB_WC_IP_CSUM_OK	= (1<<3),
981 	IB_WC_WITH_SMAC		= (1<<4),
982 	IB_WC_WITH_VLAN		= (1<<5),
983 	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
984 };
985 
986 struct ib_wc {
987 	union {
988 		u64		wr_id;
989 		struct ib_cqe	*wr_cqe;
990 	};
991 	enum ib_wc_status	status;
992 	enum ib_wc_opcode	opcode;
993 	u32			vendor_err;
994 	u32			byte_len;
995 	struct ib_qp	       *qp;
996 	union {
997 		__be32		imm_data;
998 		u32		invalidate_rkey;
999 	} ex;
1000 	u32			src_qp;
1001 	u32			slid;
1002 	int			wc_flags;
1003 	u16			pkey_index;
1004 	u8			sl;
1005 	u8			dlid_path_bits;
1006 	u8			port_num;	/* valid only for DR SMPs on switches */
1007 	u8			smac[ETH_ALEN];
1008 	u16			vlan_id;
1009 	u8			network_hdr_type;
1010 };
1011 
1012 enum ib_cq_notify_flags {
1013 	IB_CQ_SOLICITED			= 1 << 0,
1014 	IB_CQ_NEXT_COMP			= 1 << 1,
1015 	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1016 	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
1017 };
1018 
1019 enum ib_srq_type {
1020 	IB_SRQT_BASIC,
1021 	IB_SRQT_XRC,
1022 	IB_SRQT_TM,
1023 };
1024 
ib_srq_has_cq(enum ib_srq_type srq_type)1025 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1026 {
1027 	return srq_type == IB_SRQT_XRC ||
1028 	       srq_type == IB_SRQT_TM;
1029 }
1030 
1031 enum ib_srq_attr_mask {
1032 	IB_SRQ_MAX_WR	= 1 << 0,
1033 	IB_SRQ_LIMIT	= 1 << 1,
1034 };
1035 
1036 struct ib_srq_attr {
1037 	u32	max_wr;
1038 	u32	max_sge;
1039 	u32	srq_limit;
1040 };
1041 
1042 struct ib_srq_init_attr {
1043 	void		      (*event_handler)(struct ib_event *, void *);
1044 	void		       *srq_context;
1045 	struct ib_srq_attr	attr;
1046 	enum ib_srq_type	srq_type;
1047 
1048 	struct {
1049 		struct ib_cq   *cq;
1050 		union {
1051 			struct {
1052 				struct ib_xrcd *xrcd;
1053 			} xrc;
1054 
1055 			struct {
1056 				u32		max_num_tags;
1057 			} tag_matching;
1058 		};
1059 	} ext;
1060 };
1061 
1062 struct ib_qp_cap {
1063 	u32	max_send_wr;
1064 	u32	max_recv_wr;
1065 	u32	max_send_sge;
1066 	u32	max_recv_sge;
1067 	u32	max_inline_data;
1068 
1069 	/*
1070 	 * Maximum number of rdma_rw_ctx structures in flight at a time.
1071 	 * ib_create_qp() will calculate the right amount of neededed WRs
1072 	 * and MRs based on this.
1073 	 */
1074 	u32	max_rdma_ctxs;
1075 };
1076 
1077 enum ib_sig_type {
1078 	IB_SIGNAL_ALL_WR,
1079 	IB_SIGNAL_REQ_WR
1080 };
1081 
1082 enum ib_qp_type {
1083 	/*
1084 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1085 	 * here (and in that order) since the MAD layer uses them as
1086 	 * indices into a 2-entry table.
1087 	 */
1088 	IB_QPT_SMI,
1089 	IB_QPT_GSI,
1090 
1091 	IB_QPT_RC,
1092 	IB_QPT_UC,
1093 	IB_QPT_UD,
1094 	IB_QPT_RAW_IPV6,
1095 	IB_QPT_RAW_ETHERTYPE,
1096 	IB_QPT_RAW_PACKET = 8,
1097 	IB_QPT_XRC_INI = 9,
1098 	IB_QPT_XRC_TGT,
1099 	IB_QPT_MAX,
1100 	IB_QPT_DRIVER = 0xFF,
1101 	/* Reserve a range for qp types internal to the low level driver.
1102 	 * These qp types will not be visible at the IB core layer, so the
1103 	 * IB_QPT_MAX usages should not be affected in the core layer
1104 	 */
1105 	IB_QPT_RESERVED1 = 0x1000,
1106 	IB_QPT_RESERVED2,
1107 	IB_QPT_RESERVED3,
1108 	IB_QPT_RESERVED4,
1109 	IB_QPT_RESERVED5,
1110 	IB_QPT_RESERVED6,
1111 	IB_QPT_RESERVED7,
1112 	IB_QPT_RESERVED8,
1113 	IB_QPT_RESERVED9,
1114 	IB_QPT_RESERVED10,
1115 };
1116 
1117 enum ib_qp_create_flags {
1118 	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1119 	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
1120 	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1121 	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1122 	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1123 	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1124 	IB_QP_CREATE_SIGNATURE_EN		= 1 << 6,
1125 	/* FREE					= 1 << 7, */
1126 	IB_QP_CREATE_SCATTER_FCS		= 1 << 8,
1127 	IB_QP_CREATE_CVLAN_STRIPPING		= 1 << 9,
1128 	IB_QP_CREATE_SOURCE_QPN			= 1 << 10,
1129 	IB_QP_CREATE_PCI_WRITE_END_PADDING	= 1 << 11,
1130 	/* reserve bits 26-31 for low level drivers' internal use */
1131 	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1132 	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1133 };
1134 
1135 /*
1136  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1137  * callback to destroy the passed in QP.
1138  */
1139 
1140 struct ib_qp_init_attr {
1141 	void                  (*event_handler)(struct ib_event *, void *);
1142 	void		       *qp_context;
1143 	struct ib_cq	       *send_cq;
1144 	struct ib_cq	       *recv_cq;
1145 	struct ib_srq	       *srq;
1146 	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1147 	struct ib_qp_cap	cap;
1148 	enum ib_sig_type	sq_sig_type;
1149 	enum ib_qp_type		qp_type;
1150 	u32			create_flags;
1151 
1152 	/*
1153 	 * Only needed for special QP types, or when using the RW API.
1154 	 */
1155 	u8			port_num;
1156 	struct ib_rwq_ind_table *rwq_ind_tbl;
1157 	u32			source_qpn;
1158 };
1159 
1160 struct ib_qp_open_attr {
1161 	void                  (*event_handler)(struct ib_event *, void *);
1162 	void		       *qp_context;
1163 	u32			qp_num;
1164 	enum ib_qp_type		qp_type;
1165 };
1166 
1167 enum ib_rnr_timeout {
1168 	IB_RNR_TIMER_655_36 =  0,
1169 	IB_RNR_TIMER_000_01 =  1,
1170 	IB_RNR_TIMER_000_02 =  2,
1171 	IB_RNR_TIMER_000_03 =  3,
1172 	IB_RNR_TIMER_000_04 =  4,
1173 	IB_RNR_TIMER_000_06 =  5,
1174 	IB_RNR_TIMER_000_08 =  6,
1175 	IB_RNR_TIMER_000_12 =  7,
1176 	IB_RNR_TIMER_000_16 =  8,
1177 	IB_RNR_TIMER_000_24 =  9,
1178 	IB_RNR_TIMER_000_32 = 10,
1179 	IB_RNR_TIMER_000_48 = 11,
1180 	IB_RNR_TIMER_000_64 = 12,
1181 	IB_RNR_TIMER_000_96 = 13,
1182 	IB_RNR_TIMER_001_28 = 14,
1183 	IB_RNR_TIMER_001_92 = 15,
1184 	IB_RNR_TIMER_002_56 = 16,
1185 	IB_RNR_TIMER_003_84 = 17,
1186 	IB_RNR_TIMER_005_12 = 18,
1187 	IB_RNR_TIMER_007_68 = 19,
1188 	IB_RNR_TIMER_010_24 = 20,
1189 	IB_RNR_TIMER_015_36 = 21,
1190 	IB_RNR_TIMER_020_48 = 22,
1191 	IB_RNR_TIMER_030_72 = 23,
1192 	IB_RNR_TIMER_040_96 = 24,
1193 	IB_RNR_TIMER_061_44 = 25,
1194 	IB_RNR_TIMER_081_92 = 26,
1195 	IB_RNR_TIMER_122_88 = 27,
1196 	IB_RNR_TIMER_163_84 = 28,
1197 	IB_RNR_TIMER_245_76 = 29,
1198 	IB_RNR_TIMER_327_68 = 30,
1199 	IB_RNR_TIMER_491_52 = 31
1200 };
1201 
1202 enum ib_qp_attr_mask {
1203 	IB_QP_STATE			= 1,
1204 	IB_QP_CUR_STATE			= (1<<1),
1205 	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1206 	IB_QP_ACCESS_FLAGS		= (1<<3),
1207 	IB_QP_PKEY_INDEX		= (1<<4),
1208 	IB_QP_PORT			= (1<<5),
1209 	IB_QP_QKEY			= (1<<6),
1210 	IB_QP_AV			= (1<<7),
1211 	IB_QP_PATH_MTU			= (1<<8),
1212 	IB_QP_TIMEOUT			= (1<<9),
1213 	IB_QP_RETRY_CNT			= (1<<10),
1214 	IB_QP_RNR_RETRY			= (1<<11),
1215 	IB_QP_RQ_PSN			= (1<<12),
1216 	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1217 	IB_QP_ALT_PATH			= (1<<14),
1218 	IB_QP_MIN_RNR_TIMER		= (1<<15),
1219 	IB_QP_SQ_PSN			= (1<<16),
1220 	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1221 	IB_QP_PATH_MIG_STATE		= (1<<18),
1222 	IB_QP_CAP			= (1<<19),
1223 	IB_QP_DEST_QPN			= (1<<20),
1224 	IB_QP_RESERVED1			= (1<<21),
1225 	IB_QP_RESERVED2			= (1<<22),
1226 	IB_QP_RESERVED3			= (1<<23),
1227 	IB_QP_RESERVED4			= (1<<24),
1228 	IB_QP_RATE_LIMIT		= (1<<25),
1229 };
1230 
1231 enum ib_qp_state {
1232 	IB_QPS_RESET,
1233 	IB_QPS_INIT,
1234 	IB_QPS_RTR,
1235 	IB_QPS_RTS,
1236 	IB_QPS_SQD,
1237 	IB_QPS_SQE,
1238 	IB_QPS_ERR
1239 };
1240 
1241 enum ib_mig_state {
1242 	IB_MIG_MIGRATED,
1243 	IB_MIG_REARM,
1244 	IB_MIG_ARMED
1245 };
1246 
1247 enum ib_mw_type {
1248 	IB_MW_TYPE_1 = 1,
1249 	IB_MW_TYPE_2 = 2
1250 };
1251 
1252 struct ib_qp_attr {
1253 	enum ib_qp_state	qp_state;
1254 	enum ib_qp_state	cur_qp_state;
1255 	enum ib_mtu		path_mtu;
1256 	enum ib_mig_state	path_mig_state;
1257 	u32			qkey;
1258 	u32			rq_psn;
1259 	u32			sq_psn;
1260 	u32			dest_qp_num;
1261 	int			qp_access_flags;
1262 	struct ib_qp_cap	cap;
1263 	struct rdma_ah_attr	ah_attr;
1264 	struct rdma_ah_attr	alt_ah_attr;
1265 	u16			pkey_index;
1266 	u16			alt_pkey_index;
1267 	u8			en_sqd_async_notify;
1268 	u8			sq_draining;
1269 	u8			max_rd_atomic;
1270 	u8			max_dest_rd_atomic;
1271 	u8			min_rnr_timer;
1272 	u8			port_num;
1273 	u8			timeout;
1274 	u8			retry_cnt;
1275 	u8			rnr_retry;
1276 	u8			alt_port_num;
1277 	u8			alt_timeout;
1278 	u32			rate_limit;
1279 };
1280 
1281 enum ib_wr_opcode {
1282 	/* These are shared with userspace */
1283 	IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1284 	IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1285 	IB_WR_SEND = IB_UVERBS_WR_SEND,
1286 	IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1287 	IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1288 	IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1289 	IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1290 	IB_WR_LSO = IB_UVERBS_WR_TSO,
1291 	IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1292 	IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1293 	IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1294 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1295 		IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1296 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1297 		IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1298 
1299 	/* These are kernel only and can not be issued by userspace */
1300 	IB_WR_REG_MR = 0x20,
1301 	IB_WR_REG_SIG_MR,
1302 
1303 	/* reserve values for low level drivers' internal use.
1304 	 * These values will not be used at all in the ib core layer.
1305 	 */
1306 	IB_WR_RESERVED1 = 0xf0,
1307 	IB_WR_RESERVED2,
1308 	IB_WR_RESERVED3,
1309 	IB_WR_RESERVED4,
1310 	IB_WR_RESERVED5,
1311 	IB_WR_RESERVED6,
1312 	IB_WR_RESERVED7,
1313 	IB_WR_RESERVED8,
1314 	IB_WR_RESERVED9,
1315 	IB_WR_RESERVED10,
1316 };
1317 
1318 enum ib_send_flags {
1319 	IB_SEND_FENCE		= 1,
1320 	IB_SEND_SIGNALED	= (1<<1),
1321 	IB_SEND_SOLICITED	= (1<<2),
1322 	IB_SEND_INLINE		= (1<<3),
1323 	IB_SEND_IP_CSUM		= (1<<4),
1324 
1325 	/* reserve bits 26-31 for low level drivers' internal use */
1326 	IB_SEND_RESERVED_START	= (1 << 26),
1327 	IB_SEND_RESERVED_END	= (1 << 31),
1328 };
1329 
1330 struct ib_sge {
1331 	u64	addr;
1332 	u32	length;
1333 	u32	lkey;
1334 };
1335 
1336 struct ib_cqe {
1337 	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1338 };
1339 
1340 struct ib_send_wr {
1341 	struct ib_send_wr      *next;
1342 	union {
1343 		u64		wr_id;
1344 		struct ib_cqe	*wr_cqe;
1345 	};
1346 	struct ib_sge	       *sg_list;
1347 	int			num_sge;
1348 	enum ib_wr_opcode	opcode;
1349 	int			send_flags;
1350 	union {
1351 		__be32		imm_data;
1352 		u32		invalidate_rkey;
1353 	} ex;
1354 };
1355 
1356 struct ib_rdma_wr {
1357 	struct ib_send_wr	wr;
1358 	u64			remote_addr;
1359 	u32			rkey;
1360 };
1361 
rdma_wr(const struct ib_send_wr * wr)1362 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1363 {
1364 	return container_of(wr, struct ib_rdma_wr, wr);
1365 }
1366 
1367 struct ib_atomic_wr {
1368 	struct ib_send_wr	wr;
1369 	u64			remote_addr;
1370 	u64			compare_add;
1371 	u64			swap;
1372 	u64			compare_add_mask;
1373 	u64			swap_mask;
1374 	u32			rkey;
1375 };
1376 
atomic_wr(const struct ib_send_wr * wr)1377 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1378 {
1379 	return container_of(wr, struct ib_atomic_wr, wr);
1380 }
1381 
1382 struct ib_ud_wr {
1383 	struct ib_send_wr	wr;
1384 	struct ib_ah		*ah;
1385 	void			*header;
1386 	int			hlen;
1387 	int			mss;
1388 	u32			remote_qpn;
1389 	u32			remote_qkey;
1390 	u16			pkey_index; /* valid for GSI only */
1391 	u8			port_num;   /* valid for DR SMPs on switch only */
1392 };
1393 
ud_wr(const struct ib_send_wr * wr)1394 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1395 {
1396 	return container_of(wr, struct ib_ud_wr, wr);
1397 }
1398 
1399 struct ib_reg_wr {
1400 	struct ib_send_wr	wr;
1401 	struct ib_mr		*mr;
1402 	u32			key;
1403 	int			access;
1404 };
1405 
reg_wr(const struct ib_send_wr * wr)1406 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1407 {
1408 	return container_of(wr, struct ib_reg_wr, wr);
1409 }
1410 
1411 struct ib_sig_handover_wr {
1412 	struct ib_send_wr	wr;
1413 	struct ib_sig_attrs    *sig_attrs;
1414 	struct ib_mr	       *sig_mr;
1415 	int			access_flags;
1416 	struct ib_sge	       *prot;
1417 };
1418 
1419 static inline const struct ib_sig_handover_wr *
sig_handover_wr(const struct ib_send_wr * wr)1420 sig_handover_wr(const struct ib_send_wr *wr)
1421 {
1422 	return container_of(wr, struct ib_sig_handover_wr, wr);
1423 }
1424 
1425 struct ib_recv_wr {
1426 	struct ib_recv_wr      *next;
1427 	union {
1428 		u64		wr_id;
1429 		struct ib_cqe	*wr_cqe;
1430 	};
1431 	struct ib_sge	       *sg_list;
1432 	int			num_sge;
1433 };
1434 
1435 enum ib_access_flags {
1436 	IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1437 	IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1438 	IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1439 	IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1440 	IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1441 	IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1442 	IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1443 	IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1444 
1445 	IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
1446 };
1447 
1448 /*
1449  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1450  * are hidden here instead of a uapi header!
1451  */
1452 enum ib_mr_rereg_flags {
1453 	IB_MR_REREG_TRANS	= 1,
1454 	IB_MR_REREG_PD		= (1<<1),
1455 	IB_MR_REREG_ACCESS	= (1<<2),
1456 	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1457 };
1458 
1459 struct ib_fmr_attr {
1460 	int	max_pages;
1461 	int	max_maps;
1462 	u8	page_shift;
1463 };
1464 
1465 struct ib_umem;
1466 
1467 enum rdma_remove_reason {
1468 	/*
1469 	 * Userspace requested uobject deletion or initial try
1470 	 * to remove uobject via cleanup. Call could fail
1471 	 */
1472 	RDMA_REMOVE_DESTROY,
1473 	/* Context deletion. This call should delete the actual object itself */
1474 	RDMA_REMOVE_CLOSE,
1475 	/* Driver is being hot-unplugged. This call should delete the actual object itself */
1476 	RDMA_REMOVE_DRIVER_REMOVE,
1477 	/* uobj is being cleaned-up before being committed */
1478 	RDMA_REMOVE_ABORT,
1479 };
1480 
1481 struct ib_rdmacg_object {
1482 #ifdef CONFIG_CGROUP_RDMA
1483 	struct rdma_cgroup	*cg;		/* owner rdma cgroup */
1484 #endif
1485 };
1486 
1487 struct ib_ucontext {
1488 	struct ib_device       *device;
1489 	struct ib_uverbs_file  *ufile;
1490 	/*
1491 	 * 'closing' can be read by the driver only during a destroy callback,
1492 	 * it is set when we are closing the file descriptor and indicates
1493 	 * that mm_sem may be locked.
1494 	 */
1495 	int			closing;
1496 
1497 	bool cleanup_retryable;
1498 
1499 	struct pid             *tgid;
1500 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1501 	struct rb_root_cached   umem_tree;
1502 	/*
1503 	 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1504 	 * mmu notifiers registration.
1505 	 */
1506 	struct rw_semaphore	umem_rwsem;
1507 	void (*invalidate_range)(struct ib_umem *umem,
1508 				 unsigned long start, unsigned long end);
1509 
1510 	struct mmu_notifier	mn;
1511 	atomic_t		notifier_count;
1512 	/* A list of umems that don't have private mmu notifier counters yet. */
1513 	struct list_head	no_private_counters;
1514 	int                     odp_mrs_count;
1515 #endif
1516 
1517 	struct ib_rdmacg_object	cg_obj;
1518 };
1519 
1520 struct ib_uobject {
1521 	u64			user_handle;	/* handle given to us by userspace */
1522 	/* ufile & ucontext owning this object */
1523 	struct ib_uverbs_file  *ufile;
1524 	/* FIXME, save memory: ufile->context == context */
1525 	struct ib_ucontext     *context;	/* associated user context */
1526 	void		       *object;		/* containing object */
1527 	struct list_head	list;		/* link to context's list */
1528 	struct ib_rdmacg_object	cg_obj;		/* rdmacg object */
1529 	int			id;		/* index into kernel idr */
1530 	struct kref		ref;
1531 	atomic_t		usecnt;		/* protects exclusive access */
1532 	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1533 
1534 	const struct uverbs_api_object *uapi_object;
1535 };
1536 
1537 struct ib_udata {
1538 	const void __user *inbuf;
1539 	void __user *outbuf;
1540 	size_t       inlen;
1541 	size_t       outlen;
1542 };
1543 
1544 struct ib_pd {
1545 	u32			local_dma_lkey;
1546 	u32			flags;
1547 	struct ib_device       *device;
1548 	struct ib_uobject      *uobject;
1549 	atomic_t          	usecnt; /* count all resources */
1550 
1551 	u32			unsafe_global_rkey;
1552 
1553 	/*
1554 	 * Implementation details of the RDMA core, don't use in drivers:
1555 	 */
1556 	struct ib_mr	       *__internal_mr;
1557 	struct rdma_restrack_entry res;
1558 };
1559 
1560 struct ib_xrcd {
1561 	struct ib_device       *device;
1562 	atomic_t		usecnt; /* count all exposed resources */
1563 	struct inode	       *inode;
1564 
1565 	struct mutex		tgt_qp_mutex;
1566 	struct list_head	tgt_qp_list;
1567 };
1568 
1569 struct ib_ah {
1570 	struct ib_device	*device;
1571 	struct ib_pd		*pd;
1572 	struct ib_uobject	*uobject;
1573 	const struct ib_gid_attr *sgid_attr;
1574 	enum rdma_ah_attr_type	type;
1575 };
1576 
1577 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1578 
1579 enum ib_poll_context {
1580 	IB_POLL_DIRECT,		   /* caller context, no hw completions */
1581 	IB_POLL_SOFTIRQ,	   /* poll from softirq context */
1582 	IB_POLL_WORKQUEUE,	   /* poll from workqueue */
1583 	IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1584 };
1585 
1586 struct ib_cq {
1587 	struct ib_device       *device;
1588 	struct ib_uobject      *uobject;
1589 	ib_comp_handler   	comp_handler;
1590 	void                  (*event_handler)(struct ib_event *, void *);
1591 	void                   *cq_context;
1592 	int               	cqe;
1593 	atomic_t          	usecnt; /* count number of work queues */
1594 	enum ib_poll_context	poll_ctx;
1595 	struct ib_wc		*wc;
1596 	union {
1597 		struct irq_poll		iop;
1598 		struct work_struct	work;
1599 	};
1600 	struct workqueue_struct *comp_wq;
1601 	/*
1602 	 * Implementation details of the RDMA core, don't use in drivers:
1603 	 */
1604 	struct rdma_restrack_entry res;
1605 };
1606 
1607 struct ib_srq {
1608 	struct ib_device       *device;
1609 	struct ib_pd	       *pd;
1610 	struct ib_uobject      *uobject;
1611 	void		      (*event_handler)(struct ib_event *, void *);
1612 	void		       *srq_context;
1613 	enum ib_srq_type	srq_type;
1614 	atomic_t		usecnt;
1615 
1616 	struct {
1617 		struct ib_cq   *cq;
1618 		union {
1619 			struct {
1620 				struct ib_xrcd *xrcd;
1621 				u32		srq_num;
1622 			} xrc;
1623 		};
1624 	} ext;
1625 };
1626 
1627 enum ib_raw_packet_caps {
1628 	/* Strip cvlan from incoming packet and report it in the matching work
1629 	 * completion is supported.
1630 	 */
1631 	IB_RAW_PACKET_CAP_CVLAN_STRIPPING	= (1 << 0),
1632 	/* Scatter FCS field of an incoming packet to host memory is supported.
1633 	 */
1634 	IB_RAW_PACKET_CAP_SCATTER_FCS		= (1 << 1),
1635 	/* Checksum offloads are supported (for both send and receive). */
1636 	IB_RAW_PACKET_CAP_IP_CSUM		= (1 << 2),
1637 	/* When a packet is received for an RQ with no receive WQEs, the
1638 	 * packet processing is delayed.
1639 	 */
1640 	IB_RAW_PACKET_CAP_DELAY_DROP		= (1 << 3),
1641 };
1642 
1643 enum ib_wq_type {
1644 	IB_WQT_RQ
1645 };
1646 
1647 enum ib_wq_state {
1648 	IB_WQS_RESET,
1649 	IB_WQS_RDY,
1650 	IB_WQS_ERR
1651 };
1652 
1653 struct ib_wq {
1654 	struct ib_device       *device;
1655 	struct ib_uobject      *uobject;
1656 	void		    *wq_context;
1657 	void		    (*event_handler)(struct ib_event *, void *);
1658 	struct ib_pd	       *pd;
1659 	struct ib_cq	       *cq;
1660 	u32		wq_num;
1661 	enum ib_wq_state       state;
1662 	enum ib_wq_type	wq_type;
1663 	atomic_t		usecnt;
1664 };
1665 
1666 enum ib_wq_flags {
1667 	IB_WQ_FLAGS_CVLAN_STRIPPING	= 1 << 0,
1668 	IB_WQ_FLAGS_SCATTER_FCS		= 1 << 1,
1669 	IB_WQ_FLAGS_DELAY_DROP		= 1 << 2,
1670 	IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1671 };
1672 
1673 struct ib_wq_init_attr {
1674 	void		       *wq_context;
1675 	enum ib_wq_type	wq_type;
1676 	u32		max_wr;
1677 	u32		max_sge;
1678 	struct	ib_cq	       *cq;
1679 	void		    (*event_handler)(struct ib_event *, void *);
1680 	u32		create_flags; /* Use enum ib_wq_flags */
1681 };
1682 
1683 enum ib_wq_attr_mask {
1684 	IB_WQ_STATE		= 1 << 0,
1685 	IB_WQ_CUR_STATE		= 1 << 1,
1686 	IB_WQ_FLAGS		= 1 << 2,
1687 };
1688 
1689 struct ib_wq_attr {
1690 	enum	ib_wq_state	wq_state;
1691 	enum	ib_wq_state	curr_wq_state;
1692 	u32			flags; /* Use enum ib_wq_flags */
1693 	u32			flags_mask; /* Use enum ib_wq_flags */
1694 };
1695 
1696 struct ib_rwq_ind_table {
1697 	struct ib_device	*device;
1698 	struct ib_uobject      *uobject;
1699 	atomic_t		usecnt;
1700 	u32		ind_tbl_num;
1701 	u32		log_ind_tbl_size;
1702 	struct ib_wq	**ind_tbl;
1703 };
1704 
1705 struct ib_rwq_ind_table_init_attr {
1706 	u32		log_ind_tbl_size;
1707 	/* Each entry is a pointer to Receive Work Queue */
1708 	struct ib_wq	**ind_tbl;
1709 };
1710 
1711 enum port_pkey_state {
1712 	IB_PORT_PKEY_NOT_VALID = 0,
1713 	IB_PORT_PKEY_VALID = 1,
1714 	IB_PORT_PKEY_LISTED = 2,
1715 };
1716 
1717 struct ib_qp_security;
1718 
1719 struct ib_port_pkey {
1720 	enum port_pkey_state	state;
1721 	u16			pkey_index;
1722 	u8			port_num;
1723 	struct list_head	qp_list;
1724 	struct list_head	to_error_list;
1725 	struct ib_qp_security  *sec;
1726 };
1727 
1728 struct ib_ports_pkeys {
1729 	struct ib_port_pkey	main;
1730 	struct ib_port_pkey	alt;
1731 };
1732 
1733 struct ib_qp_security {
1734 	struct ib_qp	       *qp;
1735 	struct ib_device       *dev;
1736 	/* Hold this mutex when changing port and pkey settings. */
1737 	struct mutex		mutex;
1738 	struct ib_ports_pkeys  *ports_pkeys;
1739 	/* A list of all open shared QP handles.  Required to enforce security
1740 	 * properly for all users of a shared QP.
1741 	 */
1742 	struct list_head        shared_qp_list;
1743 	void                   *security;
1744 	bool			destroying;
1745 	atomic_t		error_list_count;
1746 	struct completion	error_complete;
1747 	int			error_comps_pending;
1748 };
1749 
1750 /*
1751  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1752  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1753  */
1754 struct ib_qp {
1755 	struct ib_device       *device;
1756 	struct ib_pd	       *pd;
1757 	struct ib_cq	       *send_cq;
1758 	struct ib_cq	       *recv_cq;
1759 	spinlock_t		mr_lock;
1760 	int			mrs_used;
1761 	struct list_head	rdma_mrs;
1762 	struct list_head	sig_mrs;
1763 	struct ib_srq	       *srq;
1764 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1765 	struct list_head	xrcd_list;
1766 
1767 	/* count times opened, mcast attaches, flow attaches */
1768 	atomic_t		usecnt;
1769 	struct list_head	open_list;
1770 	struct ib_qp           *real_qp;
1771 	struct ib_uobject      *uobject;
1772 	void                  (*event_handler)(struct ib_event *, void *);
1773 	void		       *qp_context;
1774 	/* sgid_attrs associated with the AV's */
1775 	const struct ib_gid_attr *av_sgid_attr;
1776 	const struct ib_gid_attr *alt_path_sgid_attr;
1777 	u32			qp_num;
1778 	u32			max_write_sge;
1779 	u32			max_read_sge;
1780 	enum ib_qp_type		qp_type;
1781 	struct ib_rwq_ind_table *rwq_ind_tbl;
1782 	struct ib_qp_security  *qp_sec;
1783 	u8			port;
1784 
1785 	/*
1786 	 * Implementation details of the RDMA core, don't use in drivers:
1787 	 */
1788 	struct rdma_restrack_entry     res;
1789 };
1790 
1791 struct ib_dm {
1792 	struct ib_device  *device;
1793 	u32		   length;
1794 	u32		   flags;
1795 	struct ib_uobject *uobject;
1796 	atomic_t	   usecnt;
1797 };
1798 
1799 struct ib_mr {
1800 	struct ib_device  *device;
1801 	struct ib_pd	  *pd;
1802 	u32		   lkey;
1803 	u32		   rkey;
1804 	u64		   iova;
1805 	u64		   length;
1806 	unsigned int	   page_size;
1807 	bool		   need_inval;
1808 	union {
1809 		struct ib_uobject	*uobject;	/* user */
1810 		struct list_head	qp_entry;	/* FR */
1811 	};
1812 
1813 	struct ib_dm      *dm;
1814 
1815 	/*
1816 	 * Implementation details of the RDMA core, don't use in drivers:
1817 	 */
1818 	struct rdma_restrack_entry res;
1819 };
1820 
1821 struct ib_mw {
1822 	struct ib_device	*device;
1823 	struct ib_pd		*pd;
1824 	struct ib_uobject	*uobject;
1825 	u32			rkey;
1826 	enum ib_mw_type         type;
1827 };
1828 
1829 struct ib_fmr {
1830 	struct ib_device	*device;
1831 	struct ib_pd		*pd;
1832 	struct list_head	list;
1833 	u32			lkey;
1834 	u32			rkey;
1835 };
1836 
1837 /* Supported steering options */
1838 enum ib_flow_attr_type {
1839 	/* steering according to rule specifications */
1840 	IB_FLOW_ATTR_NORMAL		= 0x0,
1841 	/* default unicast and multicast rule -
1842 	 * receive all Eth traffic which isn't steered to any QP
1843 	 */
1844 	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1845 	/* default multicast rule -
1846 	 * receive all Eth multicast traffic which isn't steered to any QP
1847 	 */
1848 	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1849 	/* sniffer rule - receive all port traffic */
1850 	IB_FLOW_ATTR_SNIFFER		= 0x3
1851 };
1852 
1853 /* Supported steering header types */
1854 enum ib_flow_spec_type {
1855 	/* L2 headers*/
1856 	IB_FLOW_SPEC_ETH		= 0x20,
1857 	IB_FLOW_SPEC_IB			= 0x22,
1858 	/* L3 header*/
1859 	IB_FLOW_SPEC_IPV4		= 0x30,
1860 	IB_FLOW_SPEC_IPV6		= 0x31,
1861 	IB_FLOW_SPEC_ESP                = 0x34,
1862 	/* L4 headers*/
1863 	IB_FLOW_SPEC_TCP		= 0x40,
1864 	IB_FLOW_SPEC_UDP		= 0x41,
1865 	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
1866 	IB_FLOW_SPEC_GRE		= 0x51,
1867 	IB_FLOW_SPEC_MPLS		= 0x60,
1868 	IB_FLOW_SPEC_INNER		= 0x100,
1869 	/* Actions */
1870 	IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1871 	IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1872 	IB_FLOW_SPEC_ACTION_HANDLE	= 0x1002,
1873 	IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1874 };
1875 #define IB_FLOW_SPEC_LAYER_MASK	0xF0
1876 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1877 
1878 /* Flow steering rule priority is set according to it's domain.
1879  * Lower domain value means higher priority.
1880  */
1881 enum ib_flow_domain {
1882 	IB_FLOW_DOMAIN_USER,
1883 	IB_FLOW_DOMAIN_ETHTOOL,
1884 	IB_FLOW_DOMAIN_RFS,
1885 	IB_FLOW_DOMAIN_NIC,
1886 	IB_FLOW_DOMAIN_NUM /* Must be last */
1887 };
1888 
1889 enum ib_flow_flags {
1890 	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1891 	IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1892 	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1893 };
1894 
1895 struct ib_flow_eth_filter {
1896 	u8	dst_mac[6];
1897 	u8	src_mac[6];
1898 	__be16	ether_type;
1899 	__be16	vlan_tag;
1900 	/* Must be last */
1901 	u8	real_sz[0];
1902 };
1903 
1904 struct ib_flow_spec_eth {
1905 	u32			  type;
1906 	u16			  size;
1907 	struct ib_flow_eth_filter val;
1908 	struct ib_flow_eth_filter mask;
1909 };
1910 
1911 struct ib_flow_ib_filter {
1912 	__be16 dlid;
1913 	__u8   sl;
1914 	/* Must be last */
1915 	u8	real_sz[0];
1916 };
1917 
1918 struct ib_flow_spec_ib {
1919 	u32			 type;
1920 	u16			 size;
1921 	struct ib_flow_ib_filter val;
1922 	struct ib_flow_ib_filter mask;
1923 };
1924 
1925 /* IPv4 header flags */
1926 enum ib_ipv4_flags {
1927 	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1928 	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1929 				    last have this flag set */
1930 };
1931 
1932 struct ib_flow_ipv4_filter {
1933 	__be32	src_ip;
1934 	__be32	dst_ip;
1935 	u8	proto;
1936 	u8	tos;
1937 	u8	ttl;
1938 	u8	flags;
1939 	/* Must be last */
1940 	u8	real_sz[0];
1941 };
1942 
1943 struct ib_flow_spec_ipv4 {
1944 	u32			   type;
1945 	u16			   size;
1946 	struct ib_flow_ipv4_filter val;
1947 	struct ib_flow_ipv4_filter mask;
1948 };
1949 
1950 struct ib_flow_ipv6_filter {
1951 	u8	src_ip[16];
1952 	u8	dst_ip[16];
1953 	__be32	flow_label;
1954 	u8	next_hdr;
1955 	u8	traffic_class;
1956 	u8	hop_limit;
1957 	/* Must be last */
1958 	u8	real_sz[0];
1959 };
1960 
1961 struct ib_flow_spec_ipv6 {
1962 	u32			   type;
1963 	u16			   size;
1964 	struct ib_flow_ipv6_filter val;
1965 	struct ib_flow_ipv6_filter mask;
1966 };
1967 
1968 struct ib_flow_tcp_udp_filter {
1969 	__be16	dst_port;
1970 	__be16	src_port;
1971 	/* Must be last */
1972 	u8	real_sz[0];
1973 };
1974 
1975 struct ib_flow_spec_tcp_udp {
1976 	u32			      type;
1977 	u16			      size;
1978 	struct ib_flow_tcp_udp_filter val;
1979 	struct ib_flow_tcp_udp_filter mask;
1980 };
1981 
1982 struct ib_flow_tunnel_filter {
1983 	__be32	tunnel_id;
1984 	u8	real_sz[0];
1985 };
1986 
1987 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1988  * the tunnel_id from val has the vni value
1989  */
1990 struct ib_flow_spec_tunnel {
1991 	u32			      type;
1992 	u16			      size;
1993 	struct ib_flow_tunnel_filter  val;
1994 	struct ib_flow_tunnel_filter  mask;
1995 };
1996 
1997 struct ib_flow_esp_filter {
1998 	__be32	spi;
1999 	__be32  seq;
2000 	/* Must be last */
2001 	u8	real_sz[0];
2002 };
2003 
2004 struct ib_flow_spec_esp {
2005 	u32                           type;
2006 	u16			      size;
2007 	struct ib_flow_esp_filter     val;
2008 	struct ib_flow_esp_filter     mask;
2009 };
2010 
2011 struct ib_flow_gre_filter {
2012 	__be16 c_ks_res0_ver;
2013 	__be16 protocol;
2014 	__be32 key;
2015 	/* Must be last */
2016 	u8	real_sz[0];
2017 };
2018 
2019 struct ib_flow_spec_gre {
2020 	u32                           type;
2021 	u16			      size;
2022 	struct ib_flow_gre_filter     val;
2023 	struct ib_flow_gre_filter     mask;
2024 };
2025 
2026 struct ib_flow_mpls_filter {
2027 	__be32 tag;
2028 	/* Must be last */
2029 	u8	real_sz[0];
2030 };
2031 
2032 struct ib_flow_spec_mpls {
2033 	u32                           type;
2034 	u16			      size;
2035 	struct ib_flow_mpls_filter     val;
2036 	struct ib_flow_mpls_filter     mask;
2037 };
2038 
2039 struct ib_flow_spec_action_tag {
2040 	enum ib_flow_spec_type	      type;
2041 	u16			      size;
2042 	u32                           tag_id;
2043 };
2044 
2045 struct ib_flow_spec_action_drop {
2046 	enum ib_flow_spec_type	      type;
2047 	u16			      size;
2048 };
2049 
2050 struct ib_flow_spec_action_handle {
2051 	enum ib_flow_spec_type	      type;
2052 	u16			      size;
2053 	struct ib_flow_action	     *act;
2054 };
2055 
2056 enum ib_counters_description {
2057 	IB_COUNTER_PACKETS,
2058 	IB_COUNTER_BYTES,
2059 };
2060 
2061 struct ib_flow_spec_action_count {
2062 	enum ib_flow_spec_type type;
2063 	u16 size;
2064 	struct ib_counters *counters;
2065 };
2066 
2067 union ib_flow_spec {
2068 	struct {
2069 		u32			type;
2070 		u16			size;
2071 	};
2072 	struct ib_flow_spec_eth		eth;
2073 	struct ib_flow_spec_ib		ib;
2074 	struct ib_flow_spec_ipv4        ipv4;
2075 	struct ib_flow_spec_tcp_udp	tcp_udp;
2076 	struct ib_flow_spec_ipv6        ipv6;
2077 	struct ib_flow_spec_tunnel      tunnel;
2078 	struct ib_flow_spec_esp		esp;
2079 	struct ib_flow_spec_gre		gre;
2080 	struct ib_flow_spec_mpls	mpls;
2081 	struct ib_flow_spec_action_tag  flow_tag;
2082 	struct ib_flow_spec_action_drop drop;
2083 	struct ib_flow_spec_action_handle action;
2084 	struct ib_flow_spec_action_count flow_count;
2085 };
2086 
2087 struct ib_flow_attr {
2088 	enum ib_flow_attr_type type;
2089 	u16	     size;
2090 	u16	     priority;
2091 	u32	     flags;
2092 	u8	     num_of_specs;
2093 	u8	     port;
2094 	union ib_flow_spec flows[];
2095 };
2096 
2097 struct ib_flow {
2098 	struct ib_qp		*qp;
2099 	struct ib_device	*device;
2100 	struct ib_uobject	*uobject;
2101 };
2102 
2103 enum ib_flow_action_type {
2104 	IB_FLOW_ACTION_UNSPECIFIED,
2105 	IB_FLOW_ACTION_ESP = 1,
2106 };
2107 
2108 struct ib_flow_action_attrs_esp_keymats {
2109 	enum ib_uverbs_flow_action_esp_keymat			protocol;
2110 	union {
2111 		struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2112 	} keymat;
2113 };
2114 
2115 struct ib_flow_action_attrs_esp_replays {
2116 	enum ib_uverbs_flow_action_esp_replay			protocol;
2117 	union {
2118 		struct ib_uverbs_flow_action_esp_replay_bmp	bmp;
2119 	} replay;
2120 };
2121 
2122 enum ib_flow_action_attrs_esp_flags {
2123 	/* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2124 	 * This is done in order to share the same flags between user-space and
2125 	 * kernel and spare an unnecessary translation.
2126 	 */
2127 
2128 	/* Kernel flags */
2129 	IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED	= 1ULL << 32,
2130 	IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS	= 1ULL << 33,
2131 };
2132 
2133 struct ib_flow_spec_list {
2134 	struct ib_flow_spec_list	*next;
2135 	union ib_flow_spec		spec;
2136 };
2137 
2138 struct ib_flow_action_attrs_esp {
2139 	struct ib_flow_action_attrs_esp_keymats		*keymat;
2140 	struct ib_flow_action_attrs_esp_replays		*replay;
2141 	struct ib_flow_spec_list			*encap;
2142 	/* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2143 	 * Value of 0 is a valid value.
2144 	 */
2145 	u32						esn;
2146 	u32						spi;
2147 	u32						seq;
2148 	u32						tfc_pad;
2149 	/* Use enum ib_flow_action_attrs_esp_flags */
2150 	u64						flags;
2151 	u64						hard_limit_pkts;
2152 };
2153 
2154 struct ib_flow_action {
2155 	struct ib_device		*device;
2156 	struct ib_uobject		*uobject;
2157 	enum ib_flow_action_type	type;
2158 	atomic_t			usecnt;
2159 };
2160 
2161 struct ib_mad_hdr;
2162 struct ib_grh;
2163 
2164 enum ib_process_mad_flags {
2165 	IB_MAD_IGNORE_MKEY	= 1,
2166 	IB_MAD_IGNORE_BKEY	= 2,
2167 	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2168 };
2169 
2170 enum ib_mad_result {
2171 	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2172 	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2173 	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2174 	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2175 };
2176 
2177 struct ib_port_cache {
2178 	u64		      subnet_prefix;
2179 	struct ib_pkey_cache  *pkey;
2180 	struct ib_gid_table   *gid;
2181 	u8                     lmc;
2182 	enum ib_port_state     port_state;
2183 };
2184 
2185 struct ib_cache {
2186 	rwlock_t                lock;
2187 	struct ib_event_handler event_handler;
2188 	struct ib_port_cache   *ports;
2189 };
2190 
2191 struct iw_cm_verbs;
2192 
2193 struct ib_port_immutable {
2194 	int                           pkey_tbl_len;
2195 	int                           gid_tbl_len;
2196 	u32                           core_cap_flags;
2197 	u32                           max_mad_size;
2198 };
2199 
2200 /* rdma netdev type - specifies protocol type */
2201 enum rdma_netdev_t {
2202 	RDMA_NETDEV_OPA_VNIC,
2203 	RDMA_NETDEV_IPOIB,
2204 };
2205 
2206 /**
2207  * struct rdma_netdev - rdma netdev
2208  * For cases where netstack interfacing is required.
2209  */
2210 struct rdma_netdev {
2211 	void              *clnt_priv;
2212 	struct ib_device  *hca;
2213 	u8                 port_num;
2214 
2215 	/*
2216 	 * cleanup function must be specified.
2217 	 * FIXME: This is only used for OPA_VNIC and that usage should be
2218 	 * removed too.
2219 	 */
2220 	void (*free_rdma_netdev)(struct net_device *netdev);
2221 
2222 	/* control functions */
2223 	void (*set_id)(struct net_device *netdev, int id);
2224 	/* send packet */
2225 	int (*send)(struct net_device *dev, struct sk_buff *skb,
2226 		    struct ib_ah *address, u32 dqpn);
2227 	/* multicast */
2228 	int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2229 			    union ib_gid *gid, u16 mlid,
2230 			    int set_qkey, u32 qkey);
2231 	int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2232 			    union ib_gid *gid, u16 mlid);
2233 };
2234 
2235 struct ib_port_pkey_list {
2236 	/* Lock to hold while modifying the list. */
2237 	spinlock_t                    list_lock;
2238 	struct list_head              pkey_list;
2239 };
2240 
2241 struct ib_counters {
2242 	struct ib_device	*device;
2243 	struct ib_uobject	*uobject;
2244 	/* num of objects attached */
2245 	atomic_t	usecnt;
2246 };
2247 
2248 struct ib_counters_read_attr {
2249 	u64	*counters_buff;
2250 	u32	ncounters;
2251 	u32	flags; /* use enum ib_read_counters_flags */
2252 };
2253 
2254 struct uverbs_attr_bundle;
2255 
2256 struct ib_device {
2257 	/* Do not access @dma_device directly from ULP nor from HW drivers. */
2258 	struct device                *dma_device;
2259 
2260 	char                          name[IB_DEVICE_NAME_MAX];
2261 
2262 	struct list_head              event_handler_list;
2263 	spinlock_t                    event_handler_lock;
2264 
2265 	spinlock_t                    client_data_lock;
2266 	struct list_head              core_list;
2267 	/* Access to the client_data_list is protected by the client_data_lock
2268 	 * spinlock and the lists_rwsem read-write semaphore */
2269 	struct list_head              client_data_list;
2270 
2271 	struct ib_cache               cache;
2272 	/**
2273 	 * port_immutable is indexed by port number
2274 	 */
2275 	struct ib_port_immutable     *port_immutable;
2276 
2277 	int			      num_comp_vectors;
2278 
2279 	struct ib_port_pkey_list     *port_pkey_list;
2280 
2281 	struct iw_cm_verbs	     *iwcm;
2282 
2283 	/**
2284 	 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2285 	 *   driver initialized data.  The struct is kfree()'ed by the sysfs
2286 	 *   core when the device is removed.  A lifespan of -1 in the return
2287 	 *   struct tells the core to set a default lifespan.
2288 	 */
2289 	struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
2290 						     u8 port_num);
2291 	/**
2292 	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2293 	 * @index - The index in the value array we wish to have updated, or
2294 	 *   num_counters if we want all stats updated
2295 	 * Return codes -
2296 	 *   < 0 - Error, no counters updated
2297 	 *   index - Updated the single counter pointed to by index
2298 	 *   num_counters - Updated all counters (will reset the timestamp
2299 	 *     and prevent further calls for lifespan milliseconds)
2300 	 * Drivers are allowed to update all counters in leiu of just the
2301 	 *   one given in index at their option
2302 	 */
2303 	int		           (*get_hw_stats)(struct ib_device *device,
2304 						   struct rdma_hw_stats *stats,
2305 						   u8 port, int index);
2306 	int		           (*query_device)(struct ib_device *device,
2307 						   struct ib_device_attr *device_attr,
2308 						   struct ib_udata *udata);
2309 	int		           (*query_port)(struct ib_device *device,
2310 						 u8 port_num,
2311 						 struct ib_port_attr *port_attr);
2312 	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
2313 						     u8 port_num);
2314 	/* When calling get_netdev, the HW vendor's driver should return the
2315 	 * net device of device @device at port @port_num or NULL if such
2316 	 * a net device doesn't exist. The vendor driver should call dev_hold
2317 	 * on this net device. The HW vendor's device driver must guarantee
2318 	 * that this function returns NULL before the net device has finished
2319 	 * NETDEV_UNREGISTER state.
2320 	 */
2321 	struct net_device	  *(*get_netdev)(struct ib_device *device,
2322 						 u8 port_num);
2323 	/* query_gid should be return GID value for @device, when @port_num
2324 	 * link layer is either IB or iWarp. It is no-op if @port_num port
2325 	 * is RoCE link layer.
2326 	 */
2327 	int		           (*query_gid)(struct ib_device *device,
2328 						u8 port_num, int index,
2329 						union ib_gid *gid);
2330 	/* When calling add_gid, the HW vendor's driver should add the gid
2331 	 * of device of port at gid index available at @attr. Meta-info of
2332 	 * that gid (for example, the network device related to this gid) is
2333 	 * available at @attr. @context allows the HW vendor driver to store
2334 	 * extra information together with a GID entry. The HW vendor driver may
2335 	 * allocate memory to contain this information and store it in @context
2336 	 * when a new GID entry is written to. Params are consistent until the
2337 	 * next call of add_gid or delete_gid. The function should return 0 on
2338 	 * success or error otherwise. The function could be called
2339 	 * concurrently for different ports. This function is only called when
2340 	 * roce_gid_table is used.
2341 	 */
2342 	int		           (*add_gid)(const struct ib_gid_attr *attr,
2343 					      void **context);
2344 	/* When calling del_gid, the HW vendor's driver should delete the
2345 	 * gid of device @device at gid index gid_index of port port_num
2346 	 * available in @attr.
2347 	 * Upon the deletion of a GID entry, the HW vendor must free any
2348 	 * allocated memory. The caller will clear @context afterwards.
2349 	 * This function is only called when roce_gid_table is used.
2350 	 */
2351 	int		           (*del_gid)(const struct ib_gid_attr *attr,
2352 					      void **context);
2353 	int		           (*query_pkey)(struct ib_device *device,
2354 						 u8 port_num, u16 index, u16 *pkey);
2355 	int		           (*modify_device)(struct ib_device *device,
2356 						    int device_modify_mask,
2357 						    struct ib_device_modify *device_modify);
2358 	int		           (*modify_port)(struct ib_device *device,
2359 						  u8 port_num, int port_modify_mask,
2360 						  struct ib_port_modify *port_modify);
2361 	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
2362 						     struct ib_udata *udata);
2363 	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
2364 	int                        (*mmap)(struct ib_ucontext *context,
2365 					   struct vm_area_struct *vma);
2366 	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
2367 					       struct ib_ucontext *context,
2368 					       struct ib_udata *udata);
2369 	int                        (*dealloc_pd)(struct ib_pd *pd);
2370 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
2371 						struct rdma_ah_attr *ah_attr,
2372 						struct ib_udata *udata);
2373 	int                        (*modify_ah)(struct ib_ah *ah,
2374 						struct rdma_ah_attr *ah_attr);
2375 	int                        (*query_ah)(struct ib_ah *ah,
2376 					       struct rdma_ah_attr *ah_attr);
2377 	int                        (*destroy_ah)(struct ib_ah *ah);
2378 	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
2379 						 struct ib_srq_init_attr *srq_init_attr,
2380 						 struct ib_udata *udata);
2381 	int                        (*modify_srq)(struct ib_srq *srq,
2382 						 struct ib_srq_attr *srq_attr,
2383 						 enum ib_srq_attr_mask srq_attr_mask,
2384 						 struct ib_udata *udata);
2385 	int                        (*query_srq)(struct ib_srq *srq,
2386 						struct ib_srq_attr *srq_attr);
2387 	int                        (*destroy_srq)(struct ib_srq *srq);
2388 	int                        (*post_srq_recv)(struct ib_srq *srq,
2389 						    const struct ib_recv_wr *recv_wr,
2390 						    const struct ib_recv_wr **bad_recv_wr);
2391 	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
2392 						struct ib_qp_init_attr *qp_init_attr,
2393 						struct ib_udata *udata);
2394 	int                        (*modify_qp)(struct ib_qp *qp,
2395 						struct ib_qp_attr *qp_attr,
2396 						int qp_attr_mask,
2397 						struct ib_udata *udata);
2398 	int                        (*query_qp)(struct ib_qp *qp,
2399 					       struct ib_qp_attr *qp_attr,
2400 					       int qp_attr_mask,
2401 					       struct ib_qp_init_attr *qp_init_attr);
2402 	int                        (*destroy_qp)(struct ib_qp *qp);
2403 	int                        (*post_send)(struct ib_qp *qp,
2404 						const struct ib_send_wr *send_wr,
2405 						const struct ib_send_wr **bad_send_wr);
2406 	int                        (*post_recv)(struct ib_qp *qp,
2407 						const struct ib_recv_wr *recv_wr,
2408 						const struct ib_recv_wr **bad_recv_wr);
2409 	struct ib_cq *             (*create_cq)(struct ib_device *device,
2410 						const struct ib_cq_init_attr *attr,
2411 						struct ib_ucontext *context,
2412 						struct ib_udata *udata);
2413 	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2414 						u16 cq_period);
2415 	int                        (*destroy_cq)(struct ib_cq *cq);
2416 	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
2417 						struct ib_udata *udata);
2418 	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
2419 					      struct ib_wc *wc);
2420 	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2421 	int                        (*req_notify_cq)(struct ib_cq *cq,
2422 						    enum ib_cq_notify_flags flags);
2423 	int                        (*req_ncomp_notif)(struct ib_cq *cq,
2424 						      int wc_cnt);
2425 	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2426 						 int mr_access_flags);
2427 	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2428 						  u64 start, u64 length,
2429 						  u64 virt_addr,
2430 						  int mr_access_flags,
2431 						  struct ib_udata *udata);
2432 	int			   (*rereg_user_mr)(struct ib_mr *mr,
2433 						    int flags,
2434 						    u64 start, u64 length,
2435 						    u64 virt_addr,
2436 						    int mr_access_flags,
2437 						    struct ib_pd *pd,
2438 						    struct ib_udata *udata);
2439 	int                        (*dereg_mr)(struct ib_mr *mr);
2440 	struct ib_mr *		   (*alloc_mr)(struct ib_pd *pd,
2441 					       enum ib_mr_type mr_type,
2442 					       u32 max_num_sg);
2443 	int                        (*map_mr_sg)(struct ib_mr *mr,
2444 						struct scatterlist *sg,
2445 						int sg_nents,
2446 						unsigned int *sg_offset);
2447 	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2448 					       enum ib_mw_type type,
2449 					       struct ib_udata *udata);
2450 	int                        (*dealloc_mw)(struct ib_mw *mw);
2451 	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
2452 						int mr_access_flags,
2453 						struct ib_fmr_attr *fmr_attr);
2454 	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
2455 						   u64 *page_list, int list_len,
2456 						   u64 iova);
2457 	int		           (*unmap_fmr)(struct list_head *fmr_list);
2458 	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
2459 	int                        (*attach_mcast)(struct ib_qp *qp,
2460 						   union ib_gid *gid,
2461 						   u16 lid);
2462 	int                        (*detach_mcast)(struct ib_qp *qp,
2463 						   union ib_gid *gid,
2464 						   u16 lid);
2465 	int                        (*process_mad)(struct ib_device *device,
2466 						  int process_mad_flags,
2467 						  u8 port_num,
2468 						  const struct ib_wc *in_wc,
2469 						  const struct ib_grh *in_grh,
2470 						  const struct ib_mad_hdr *in_mad,
2471 						  size_t in_mad_size,
2472 						  struct ib_mad_hdr *out_mad,
2473 						  size_t *out_mad_size,
2474 						  u16 *out_mad_pkey_index);
2475 	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
2476 						 struct ib_ucontext *ucontext,
2477 						 struct ib_udata *udata);
2478 	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2479 	struct ib_flow *	   (*create_flow)(struct ib_qp *qp,
2480 						  struct ib_flow_attr
2481 						  *flow_attr,
2482 						  int domain,
2483 						  struct ib_udata *udata);
2484 	int			   (*destroy_flow)(struct ib_flow *flow_id);
2485 	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2486 						      struct ib_mr_status *mr_status);
2487 	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2488 	void			   (*drain_rq)(struct ib_qp *qp);
2489 	void			   (*drain_sq)(struct ib_qp *qp);
2490 	int			   (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2491 							int state);
2492 	int			   (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2493 						   struct ifla_vf_info *ivf);
2494 	int			   (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2495 						   struct ifla_vf_stats *stats);
2496 	int			   (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2497 						  int type);
2498 	struct ib_wq *		   (*create_wq)(struct ib_pd *pd,
2499 						struct ib_wq_init_attr *init_attr,
2500 						struct ib_udata *udata);
2501 	int			   (*destroy_wq)(struct ib_wq *wq);
2502 	int			   (*modify_wq)(struct ib_wq *wq,
2503 						struct ib_wq_attr *attr,
2504 						u32 wq_attr_mask,
2505 						struct ib_udata *udata);
2506 	struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2507 							   struct ib_rwq_ind_table_init_attr *init_attr,
2508 							   struct ib_udata *udata);
2509 	int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2510 	struct ib_flow_action *	   (*create_flow_action_esp)(struct ib_device *device,
2511 							     const struct ib_flow_action_attrs_esp *attr,
2512 							     struct uverbs_attr_bundle *attrs);
2513 	int			   (*destroy_flow_action)(struct ib_flow_action *action);
2514 	int			   (*modify_flow_action_esp)(struct ib_flow_action *action,
2515 							     const struct ib_flow_action_attrs_esp *attr,
2516 							     struct uverbs_attr_bundle *attrs);
2517 	struct ib_dm *             (*alloc_dm)(struct ib_device *device,
2518 					       struct ib_ucontext *context,
2519 					       struct ib_dm_alloc_attr *attr,
2520 					       struct uverbs_attr_bundle *attrs);
2521 	int                        (*dealloc_dm)(struct ib_dm *dm);
2522 	struct ib_mr *             (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2523 						struct ib_dm_mr_attr *attr,
2524 						struct uverbs_attr_bundle *attrs);
2525 	struct ib_counters *	(*create_counters)(struct ib_device *device,
2526 						   struct uverbs_attr_bundle *attrs);
2527 	int	(*destroy_counters)(struct ib_counters	*counters);
2528 	int	(*read_counters)(struct ib_counters *counters,
2529 				 struct ib_counters_read_attr *counters_read_attr,
2530 				 struct uverbs_attr_bundle *attrs);
2531 
2532 	/**
2533 	 * rdma netdev operation
2534 	 *
2535 	 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2536 	 * doesn't support the specified rdma netdev type.
2537 	 */
2538 	struct net_device *(*alloc_rdma_netdev)(
2539 					struct ib_device *device,
2540 					u8 port_num,
2541 					enum rdma_netdev_t type,
2542 					const char *name,
2543 					unsigned char name_assign_type,
2544 					void (*setup)(struct net_device *));
2545 
2546 	struct module               *owner;
2547 	struct device                dev;
2548 	struct kobject               *ports_parent;
2549 	struct list_head             port_list;
2550 
2551 	enum {
2552 		IB_DEV_UNINITIALIZED,
2553 		IB_DEV_REGISTERED,
2554 		IB_DEV_UNREGISTERED
2555 	}                            reg_state;
2556 
2557 	int			     uverbs_abi_ver;
2558 	u64			     uverbs_cmd_mask;
2559 	u64			     uverbs_ex_cmd_mask;
2560 
2561 	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2562 	__be64			     node_guid;
2563 	u32			     local_dma_lkey;
2564 	u16                          is_switch:1;
2565 	u8                           node_type;
2566 	u8                           phys_port_cnt;
2567 	struct ib_device_attr        attrs;
2568 	struct attribute_group	     *hw_stats_ag;
2569 	struct rdma_hw_stats         *hw_stats;
2570 
2571 #ifdef CONFIG_CGROUP_RDMA
2572 	struct rdmacg_device         cg_device;
2573 #endif
2574 
2575 	u32                          index;
2576 	/*
2577 	 * Implementation details of the RDMA core, don't use in drivers
2578 	 */
2579 	struct rdma_restrack_root     res;
2580 
2581 	/**
2582 	 * The following mandatory functions are used only at device
2583 	 * registration.  Keep functions such as these at the end of this
2584 	 * structure to avoid cache line misses when accessing struct ib_device
2585 	 * in fast paths.
2586 	 */
2587 	int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2588 	void (*get_dev_fw_str)(struct ib_device *, char *str);
2589 	const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2590 						     int comp_vector);
2591 
2592 	const struct uverbs_object_tree_def *const *driver_specs;
2593 	enum rdma_driver_id		driver_id;
2594 };
2595 
2596 struct ib_client {
2597 	char  *name;
2598 	void (*add)   (struct ib_device *);
2599 	void (*remove)(struct ib_device *, void *client_data);
2600 
2601 	/* Returns the net_dev belonging to this ib_client and matching the
2602 	 * given parameters.
2603 	 * @dev:	 An RDMA device that the net_dev use for communication.
2604 	 * @port:	 A physical port number on the RDMA device.
2605 	 * @pkey:	 P_Key that the net_dev uses if applicable.
2606 	 * @gid:	 A GID that the net_dev uses to communicate.
2607 	 * @addr:	 An IP address the net_dev is configured with.
2608 	 * @client_data: The device's client data set by ib_set_client_data().
2609 	 *
2610 	 * An ib_client that implements a net_dev on top of RDMA devices
2611 	 * (such as IP over IB) should implement this callback, allowing the
2612 	 * rdma_cm module to find the right net_dev for a given request.
2613 	 *
2614 	 * The caller is responsible for calling dev_put on the returned
2615 	 * netdev. */
2616 	struct net_device *(*get_net_dev_by_params)(
2617 			struct ib_device *dev,
2618 			u8 port,
2619 			u16 pkey,
2620 			const union ib_gid *gid,
2621 			const struct sockaddr *addr,
2622 			void *client_data);
2623 	struct list_head list;
2624 };
2625 
2626 struct ib_device *ib_alloc_device(size_t size);
2627 void ib_dealloc_device(struct ib_device *device);
2628 
2629 void ib_get_device_fw_str(struct ib_device *device, char *str);
2630 
2631 int ib_register_device(struct ib_device *device,
2632 		       int (*port_callback)(struct ib_device *,
2633 					    u8, struct kobject *));
2634 void ib_unregister_device(struct ib_device *device);
2635 
2636 int ib_register_client   (struct ib_client *client);
2637 void ib_unregister_client(struct ib_client *client);
2638 
2639 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2640 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2641 			 void *data);
2642 
ib_copy_from_udata(void * dest,struct ib_udata * udata,size_t len)2643 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2644 {
2645 	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2646 }
2647 
ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)2648 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2649 {
2650 	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2651 }
2652 
ib_is_buffer_cleared(const void __user * p,size_t len)2653 static inline bool ib_is_buffer_cleared(const void __user *p,
2654 					size_t len)
2655 {
2656 	bool ret;
2657 	u8 *buf;
2658 
2659 	if (len > USHRT_MAX)
2660 		return false;
2661 
2662 	buf = memdup_user(p, len);
2663 	if (IS_ERR(buf))
2664 		return false;
2665 
2666 	ret = !memchr_inv(buf, 0, len);
2667 	kfree(buf);
2668 	return ret;
2669 }
2670 
ib_is_udata_cleared(struct ib_udata * udata,size_t offset,size_t len)2671 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2672 				       size_t offset,
2673 				       size_t len)
2674 {
2675 	return ib_is_buffer_cleared(udata->inbuf + offset, len);
2676 }
2677 
2678 /**
2679  * ib_is_destroy_retryable - Check whether the uobject destruction
2680  * is retryable.
2681  * @ret: The initial destruction return code
2682  * @why: remove reason
2683  * @uobj: The uobject that is destroyed
2684  *
2685  * This function is a helper function that IB layer and low-level drivers
2686  * can use to consider whether the destruction of the given uobject is
2687  * retry-able.
2688  * It checks the original return code, if it wasn't success the destruction
2689  * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2690  * the remove reason. (i.e. why).
2691  * Must be called with the object locked for destroy.
2692  */
ib_is_destroy_retryable(int ret,enum rdma_remove_reason why,struct ib_uobject * uobj)2693 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2694 					   struct ib_uobject *uobj)
2695 {
2696 	return ret && (why == RDMA_REMOVE_DESTROY ||
2697 		       uobj->context->cleanup_retryable);
2698 }
2699 
2700 /**
2701  * ib_destroy_usecnt - Called during destruction to check the usecnt
2702  * @usecnt: The usecnt atomic
2703  * @why: remove reason
2704  * @uobj: The uobject that is destroyed
2705  *
2706  * Non-zero usecnts will block destruction unless destruction was triggered by
2707  * a ucontext cleanup.
2708  */
ib_destroy_usecnt(atomic_t * usecnt,enum rdma_remove_reason why,struct ib_uobject * uobj)2709 static inline int ib_destroy_usecnt(atomic_t *usecnt,
2710 				    enum rdma_remove_reason why,
2711 				    struct ib_uobject *uobj)
2712 {
2713 	if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2714 		return -EBUSY;
2715 	return 0;
2716 }
2717 
2718 /**
2719  * ib_modify_qp_is_ok - Check that the supplied attribute mask
2720  * contains all required attributes and no attributes not allowed for
2721  * the given QP state transition.
2722  * @cur_state: Current QP state
2723  * @next_state: Next QP state
2724  * @type: QP type
2725  * @mask: Mask of supplied QP attributes
2726  * @ll : link layer of port
2727  *
2728  * This function is a helper function that a low-level driver's
2729  * modify_qp method can use to validate the consumer's input.  It
2730  * checks that cur_state and next_state are valid QP states, that a
2731  * transition from cur_state to next_state is allowed by the IB spec,
2732  * and that the attribute mask supplied is allowed for the transition.
2733  */
2734 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2735 			enum ib_qp_type type, enum ib_qp_attr_mask mask,
2736 			enum rdma_link_layer ll);
2737 
2738 void ib_register_event_handler(struct ib_event_handler *event_handler);
2739 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2740 void ib_dispatch_event(struct ib_event *event);
2741 
2742 int ib_query_port(struct ib_device *device,
2743 		  u8 port_num, struct ib_port_attr *port_attr);
2744 
2745 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2746 					       u8 port_num);
2747 
2748 /**
2749  * rdma_cap_ib_switch - Check if the device is IB switch
2750  * @device: Device to check
2751  *
2752  * Device driver is responsible for setting is_switch bit on
2753  * in ib_device structure at init time.
2754  *
2755  * Return: true if the device is IB switch.
2756  */
rdma_cap_ib_switch(const struct ib_device * device)2757 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2758 {
2759 	return device->is_switch;
2760 }
2761 
2762 /**
2763  * rdma_start_port - Return the first valid port number for the device
2764  * specified
2765  *
2766  * @device: Device to be checked
2767  *
2768  * Return start port number
2769  */
rdma_start_port(const struct ib_device * device)2770 static inline u8 rdma_start_port(const struct ib_device *device)
2771 {
2772 	return rdma_cap_ib_switch(device) ? 0 : 1;
2773 }
2774 
2775 /**
2776  * rdma_end_port - Return the last valid port number for the device
2777  * specified
2778  *
2779  * @device: Device to be checked
2780  *
2781  * Return last port number
2782  */
rdma_end_port(const struct ib_device * device)2783 static inline u8 rdma_end_port(const struct ib_device *device)
2784 {
2785 	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2786 }
2787 
rdma_is_port_valid(const struct ib_device * device,unsigned int port)2788 static inline int rdma_is_port_valid(const struct ib_device *device,
2789 				     unsigned int port)
2790 {
2791 	return (port >= rdma_start_port(device) &&
2792 		port <= rdma_end_port(device));
2793 }
2794 
rdma_is_grh_required(const struct ib_device * device,u8 port_num)2795 static inline bool rdma_is_grh_required(const struct ib_device *device,
2796 					u8 port_num)
2797 {
2798 	return device->port_immutable[port_num].core_cap_flags &
2799 		RDMA_CORE_PORT_IB_GRH_REQUIRED;
2800 }
2801 
rdma_protocol_ib(const struct ib_device * device,u8 port_num)2802 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2803 {
2804 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2805 }
2806 
rdma_protocol_roce(const struct ib_device * device,u8 port_num)2807 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2808 {
2809 	return device->port_immutable[port_num].core_cap_flags &
2810 		(RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2811 }
2812 
rdma_protocol_roce_udp_encap(const struct ib_device * device,u8 port_num)2813 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2814 {
2815 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2816 }
2817 
rdma_protocol_roce_eth_encap(const struct ib_device * device,u8 port_num)2818 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2819 {
2820 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2821 }
2822 
rdma_protocol_iwarp(const struct ib_device * device,u8 port_num)2823 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2824 {
2825 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2826 }
2827 
rdma_ib_or_roce(const struct ib_device * device,u8 port_num)2828 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2829 {
2830 	return rdma_protocol_ib(device, port_num) ||
2831 		rdma_protocol_roce(device, port_num);
2832 }
2833 
rdma_protocol_raw_packet(const struct ib_device * device,u8 port_num)2834 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2835 {
2836 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2837 }
2838 
rdma_protocol_usnic(const struct ib_device * device,u8 port_num)2839 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2840 {
2841 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2842 }
2843 
2844 /**
2845  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2846  * Management Datagrams.
2847  * @device: Device to check
2848  * @port_num: Port number to check
2849  *
2850  * Management Datagrams (MAD) are a required part of the InfiniBand
2851  * specification and are supported on all InfiniBand devices.  A slightly
2852  * extended version are also supported on OPA interfaces.
2853  *
2854  * Return: true if the port supports sending/receiving of MAD packets.
2855  */
rdma_cap_ib_mad(const struct ib_device * device,u8 port_num)2856 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2857 {
2858 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2859 }
2860 
2861 /**
2862  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2863  * Management Datagrams.
2864  * @device: Device to check
2865  * @port_num: Port number to check
2866  *
2867  * Intel OmniPath devices extend and/or replace the InfiniBand Management
2868  * datagrams with their own versions.  These OPA MADs share many but not all of
2869  * the characteristics of InfiniBand MADs.
2870  *
2871  * OPA MADs differ in the following ways:
2872  *
2873  *    1) MADs are variable size up to 2K
2874  *       IBTA defined MADs remain fixed at 256 bytes
2875  *    2) OPA SMPs must carry valid PKeys
2876  *    3) OPA SMP packets are a different format
2877  *
2878  * Return: true if the port supports OPA MAD packet formats.
2879  */
rdma_cap_opa_mad(struct ib_device * device,u8 port_num)2880 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2881 {
2882 	return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2883 		== RDMA_CORE_CAP_OPA_MAD;
2884 }
2885 
2886 /**
2887  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2888  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2889  * @device: Device to check
2890  * @port_num: Port number to check
2891  *
2892  * Each InfiniBand node is required to provide a Subnet Management Agent
2893  * that the subnet manager can access.  Prior to the fabric being fully
2894  * configured by the subnet manager, the SMA is accessed via a well known
2895  * interface called the Subnet Management Interface (SMI).  This interface
2896  * uses directed route packets to communicate with the SM to get around the
2897  * chicken and egg problem of the SM needing to know what's on the fabric
2898  * in order to configure the fabric, and needing to configure the fabric in
2899  * order to send packets to the devices on the fabric.  These directed
2900  * route packets do not need the fabric fully configured in order to reach
2901  * their destination.  The SMI is the only method allowed to send
2902  * directed route packets on an InfiniBand fabric.
2903  *
2904  * Return: true if the port provides an SMI.
2905  */
rdma_cap_ib_smi(const struct ib_device * device,u8 port_num)2906 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2907 {
2908 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2909 }
2910 
2911 /**
2912  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2913  * Communication Manager.
2914  * @device: Device to check
2915  * @port_num: Port number to check
2916  *
2917  * The InfiniBand Communication Manager is one of many pre-defined General
2918  * Service Agents (GSA) that are accessed via the General Service
2919  * Interface (GSI).  It's role is to facilitate establishment of connections
2920  * between nodes as well as other management related tasks for established
2921  * connections.
2922  *
2923  * Return: true if the port supports an IB CM (this does not guarantee that
2924  * a CM is actually running however).
2925  */
rdma_cap_ib_cm(const struct ib_device * device,u8 port_num)2926 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2927 {
2928 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2929 }
2930 
2931 /**
2932  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2933  * Communication Manager.
2934  * @device: Device to check
2935  * @port_num: Port number to check
2936  *
2937  * Similar to above, but specific to iWARP connections which have a different
2938  * managment protocol than InfiniBand.
2939  *
2940  * Return: true if the port supports an iWARP CM (this does not guarantee that
2941  * a CM is actually running however).
2942  */
rdma_cap_iw_cm(const struct ib_device * device,u8 port_num)2943 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2944 {
2945 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2946 }
2947 
2948 /**
2949  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2950  * Subnet Administration.
2951  * @device: Device to check
2952  * @port_num: Port number to check
2953  *
2954  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2955  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2956  * fabrics, devices should resolve routes to other hosts by contacting the
2957  * SA to query the proper route.
2958  *
2959  * Return: true if the port should act as a client to the fabric Subnet
2960  * Administration interface.  This does not imply that the SA service is
2961  * running locally.
2962  */
rdma_cap_ib_sa(const struct ib_device * device,u8 port_num)2963 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2964 {
2965 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2966 }
2967 
2968 /**
2969  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2970  * Multicast.
2971  * @device: Device to check
2972  * @port_num: Port number to check
2973  *
2974  * InfiniBand multicast registration is more complex than normal IPv4 or
2975  * IPv6 multicast registration.  Each Host Channel Adapter must register
2976  * with the Subnet Manager when it wishes to join a multicast group.  It
2977  * should do so only once regardless of how many queue pairs it subscribes
2978  * to this group.  And it should leave the group only after all queue pairs
2979  * attached to the group have been detached.
2980  *
2981  * Return: true if the port must undertake the additional adminstrative
2982  * overhead of registering/unregistering with the SM and tracking of the
2983  * total number of queue pairs attached to the multicast group.
2984  */
rdma_cap_ib_mcast(const struct ib_device * device,u8 port_num)2985 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2986 {
2987 	return rdma_cap_ib_sa(device, port_num);
2988 }
2989 
2990 /**
2991  * rdma_cap_af_ib - Check if the port of device has the capability
2992  * Native Infiniband Address.
2993  * @device: Device to check
2994  * @port_num: Port number to check
2995  *
2996  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2997  * GID.  RoCE uses a different mechanism, but still generates a GID via
2998  * a prescribed mechanism and port specific data.
2999  *
3000  * Return: true if the port uses a GID address to identify devices on the
3001  * network.
3002  */
rdma_cap_af_ib(const struct ib_device * device,u8 port_num)3003 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3004 {
3005 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
3006 }
3007 
3008 /**
3009  * rdma_cap_eth_ah - Check if the port of device has the capability
3010  * Ethernet Address Handle.
3011  * @device: Device to check
3012  * @port_num: Port number to check
3013  *
3014  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3015  * to fabricate GIDs over Ethernet/IP specific addresses native to the
3016  * port.  Normally, packet headers are generated by the sending host
3017  * adapter, but when sending connectionless datagrams, we must manually
3018  * inject the proper headers for the fabric we are communicating over.
3019  *
3020  * Return: true if we are running as a RoCE port and must force the
3021  * addition of a Global Route Header built from our Ethernet Address
3022  * Handle into our header list for connectionless packets.
3023  */
rdma_cap_eth_ah(const struct ib_device * device,u8 port_num)3024 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3025 {
3026 	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
3027 }
3028 
3029 /**
3030  * rdma_cap_opa_ah - Check if the port of device supports
3031  * OPA Address handles
3032  * @device: Device to check
3033  * @port_num: Port number to check
3034  *
3035  * Return: true if we are running on an OPA device which supports
3036  * the extended OPA addressing.
3037  */
rdma_cap_opa_ah(struct ib_device * device,u8 port_num)3038 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3039 {
3040 	return (device->port_immutable[port_num].core_cap_flags &
3041 		RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3042 }
3043 
3044 /**
3045  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3046  *
3047  * @device: Device
3048  * @port_num: Port number
3049  *
3050  * This MAD size includes the MAD headers and MAD payload.  No other headers
3051  * are included.
3052  *
3053  * Return the max MAD size required by the Port.  Will return 0 if the port
3054  * does not support MADs
3055  */
rdma_max_mad_size(const struct ib_device * device,u8 port_num)3056 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3057 {
3058 	return device->port_immutable[port_num].max_mad_size;
3059 }
3060 
3061 /**
3062  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3063  * @device: Device to check
3064  * @port_num: Port number to check
3065  *
3066  * RoCE GID table mechanism manages the various GIDs for a device.
3067  *
3068  * NOTE: if allocating the port's GID table has failed, this call will still
3069  * return true, but any RoCE GID table API will fail.
3070  *
3071  * Return: true if the port uses RoCE GID table mechanism in order to manage
3072  * its GIDs.
3073  */
rdma_cap_roce_gid_table(const struct ib_device * device,u8 port_num)3074 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3075 					   u8 port_num)
3076 {
3077 	return rdma_protocol_roce(device, port_num) &&
3078 		device->add_gid && device->del_gid;
3079 }
3080 
3081 /*
3082  * Check if the device supports READ W/ INVALIDATE.
3083  */
rdma_cap_read_inv(struct ib_device * dev,u32 port_num)3084 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3085 {
3086 	/*
3087 	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3088 	 * has support for it yet.
3089 	 */
3090 	return rdma_protocol_iwarp(dev, port_num);
3091 }
3092 
3093 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3094 			 int state);
3095 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3096 		     struct ifla_vf_info *info);
3097 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3098 		    struct ifla_vf_stats *stats);
3099 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3100 		   int type);
3101 
3102 int ib_query_pkey(struct ib_device *device,
3103 		  u8 port_num, u16 index, u16 *pkey);
3104 
3105 int ib_modify_device(struct ib_device *device,
3106 		     int device_modify_mask,
3107 		     struct ib_device_modify *device_modify);
3108 
3109 int ib_modify_port(struct ib_device *device,
3110 		   u8 port_num, int port_modify_mask,
3111 		   struct ib_port_modify *port_modify);
3112 
3113 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3114 		u8 *port_num, u16 *index);
3115 
3116 int ib_find_pkey(struct ib_device *device,
3117 		 u8 port_num, u16 pkey, u16 *index);
3118 
3119 enum ib_pd_flags {
3120 	/*
3121 	 * Create a memory registration for all memory in the system and place
3122 	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3123 	 * ULPs to avoid the overhead of dynamic MRs.
3124 	 *
3125 	 * This flag is generally considered unsafe and must only be used in
3126 	 * extremly trusted environments.  Every use of it will log a warning
3127 	 * in the kernel log.
3128 	 */
3129 	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
3130 };
3131 
3132 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3133 		const char *caller);
3134 #define ib_alloc_pd(device, flags) \
3135 	__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3136 void ib_dealloc_pd(struct ib_pd *pd);
3137 
3138 /**
3139  * rdma_create_ah - Creates an address handle for the given address vector.
3140  * @pd: The protection domain associated with the address handle.
3141  * @ah_attr: The attributes of the address vector.
3142  *
3143  * The address handle is used to reference a local or global destination
3144  * in all UD QP post sends.
3145  */
3146 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
3147 
3148 /**
3149  * rdma_create_user_ah - Creates an address handle for the given address vector.
3150  * It resolves destination mac address for ah attribute of RoCE type.
3151  * @pd: The protection domain associated with the address handle.
3152  * @ah_attr: The attributes of the address vector.
3153  * @udata: pointer to user's input output buffer information need by
3154  *         provider driver.
3155  *
3156  * It returns 0 on success and returns appropriate error code on error.
3157  * The address handle is used to reference a local or global destination
3158  * in all UD QP post sends.
3159  */
3160 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3161 				  struct rdma_ah_attr *ah_attr,
3162 				  struct ib_udata *udata);
3163 /**
3164  * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3165  *   work completion.
3166  * @hdr: the L3 header to parse
3167  * @net_type: type of header to parse
3168  * @sgid: place to store source gid
3169  * @dgid: place to store destination gid
3170  */
3171 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3172 			      enum rdma_network_type net_type,
3173 			      union ib_gid *sgid, union ib_gid *dgid);
3174 
3175 /**
3176  * ib_get_rdma_header_version - Get the header version
3177  * @hdr: the L3 header to parse
3178  */
3179 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3180 
3181 /**
3182  * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3183  *   work completion.
3184  * @device: Device on which the received message arrived.
3185  * @port_num: Port on which the received message arrived.
3186  * @wc: Work completion associated with the received message.
3187  * @grh: References the received global route header.  This parameter is
3188  *   ignored unless the work completion indicates that the GRH is valid.
3189  * @ah_attr: Returned attributes that can be used when creating an address
3190  *   handle for replying to the message.
3191  * When ib_init_ah_attr_from_wc() returns success,
3192  * (a) for IB link layer it optionally contains a reference to SGID attribute
3193  * when GRH is present for IB link layer.
3194  * (b) for RoCE link layer it contains a reference to SGID attribute.
3195  * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3196  * attributes which are initialized using ib_init_ah_attr_from_wc().
3197  *
3198  */
3199 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3200 			    const struct ib_wc *wc, const struct ib_grh *grh,
3201 			    struct rdma_ah_attr *ah_attr);
3202 
3203 /**
3204  * ib_create_ah_from_wc - Creates an address handle associated with the
3205  *   sender of the specified work completion.
3206  * @pd: The protection domain associated with the address handle.
3207  * @wc: Work completion information associated with a received message.
3208  * @grh: References the received global route header.  This parameter is
3209  *   ignored unless the work completion indicates that the GRH is valid.
3210  * @port_num: The outbound port number to associate with the address.
3211  *
3212  * The address handle is used to reference a local or global destination
3213  * in all UD QP post sends.
3214  */
3215 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3216 				   const struct ib_grh *grh, u8 port_num);
3217 
3218 /**
3219  * rdma_modify_ah - Modifies the address vector associated with an address
3220  *   handle.
3221  * @ah: The address handle to modify.
3222  * @ah_attr: The new address vector attributes to associate with the
3223  *   address handle.
3224  */
3225 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3226 
3227 /**
3228  * rdma_query_ah - Queries the address vector associated with an address
3229  *   handle.
3230  * @ah: The address handle to query.
3231  * @ah_attr: The address vector attributes associated with the address
3232  *   handle.
3233  */
3234 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3235 
3236 /**
3237  * rdma_destroy_ah - Destroys an address handle.
3238  * @ah: The address handle to destroy.
3239  */
3240 int rdma_destroy_ah(struct ib_ah *ah);
3241 
3242 /**
3243  * ib_create_srq - Creates a SRQ associated with the specified protection
3244  *   domain.
3245  * @pd: The protection domain associated with the SRQ.
3246  * @srq_init_attr: A list of initial attributes required to create the
3247  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
3248  *   the actual capabilities of the created SRQ.
3249  *
3250  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3251  * requested size of the SRQ, and set to the actual values allocated
3252  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
3253  * will always be at least as large as the requested values.
3254  */
3255 struct ib_srq *ib_create_srq(struct ib_pd *pd,
3256 			     struct ib_srq_init_attr *srq_init_attr);
3257 
3258 /**
3259  * ib_modify_srq - Modifies the attributes for the specified SRQ.
3260  * @srq: The SRQ to modify.
3261  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3262  *   the current values of selected SRQ attributes are returned.
3263  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3264  *   are being modified.
3265  *
3266  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3267  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3268  * the number of receives queued drops below the limit.
3269  */
3270 int ib_modify_srq(struct ib_srq *srq,
3271 		  struct ib_srq_attr *srq_attr,
3272 		  enum ib_srq_attr_mask srq_attr_mask);
3273 
3274 /**
3275  * ib_query_srq - Returns the attribute list and current values for the
3276  *   specified SRQ.
3277  * @srq: The SRQ to query.
3278  * @srq_attr: The attributes of the specified SRQ.
3279  */
3280 int ib_query_srq(struct ib_srq *srq,
3281 		 struct ib_srq_attr *srq_attr);
3282 
3283 /**
3284  * ib_destroy_srq - Destroys the specified SRQ.
3285  * @srq: The SRQ to destroy.
3286  */
3287 int ib_destroy_srq(struct ib_srq *srq);
3288 
3289 /**
3290  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3291  * @srq: The SRQ to post the work request on.
3292  * @recv_wr: A list of work requests to post on the receive queue.
3293  * @bad_recv_wr: On an immediate failure, this parameter will reference
3294  *   the work request that failed to be posted on the QP.
3295  */
ib_post_srq_recv(struct ib_srq * srq,const struct ib_recv_wr * recv_wr,const struct ib_recv_wr ** bad_recv_wr)3296 static inline int ib_post_srq_recv(struct ib_srq *srq,
3297 				   const struct ib_recv_wr *recv_wr,
3298 				   const struct ib_recv_wr **bad_recv_wr)
3299 {
3300 	const struct ib_recv_wr *dummy;
3301 
3302 	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy);
3303 }
3304 
3305 /**
3306  * ib_create_qp - Creates a QP associated with the specified protection
3307  *   domain.
3308  * @pd: The protection domain associated with the QP.
3309  * @qp_init_attr: A list of initial attributes required to create the
3310  *   QP.  If QP creation succeeds, then the attributes are updated to
3311  *   the actual capabilities of the created QP.
3312  */
3313 struct ib_qp *ib_create_qp(struct ib_pd *pd,
3314 			   struct ib_qp_init_attr *qp_init_attr);
3315 
3316 /**
3317  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3318  * @qp: The QP to modify.
3319  * @attr: On input, specifies the QP attributes to modify.  On output,
3320  *   the current values of selected QP attributes are returned.
3321  * @attr_mask: A bit-mask used to specify which attributes of the QP
3322  *   are being modified.
3323  * @udata: pointer to user's input output buffer information
3324  *   are being modified.
3325  * It returns 0 on success and returns appropriate error code on error.
3326  */
3327 int ib_modify_qp_with_udata(struct ib_qp *qp,
3328 			    struct ib_qp_attr *attr,
3329 			    int attr_mask,
3330 			    struct ib_udata *udata);
3331 
3332 /**
3333  * ib_modify_qp - Modifies the attributes for the specified QP and then
3334  *   transitions the QP to the given state.
3335  * @qp: The QP to modify.
3336  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3337  *   the current values of selected QP attributes are returned.
3338  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3339  *   are being modified.
3340  */
3341 int ib_modify_qp(struct ib_qp *qp,
3342 		 struct ib_qp_attr *qp_attr,
3343 		 int qp_attr_mask);
3344 
3345 /**
3346  * ib_query_qp - Returns the attribute list and current values for the
3347  *   specified QP.
3348  * @qp: The QP to query.
3349  * @qp_attr: The attributes of the specified QP.
3350  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3351  * @qp_init_attr: Additional attributes of the selected QP.
3352  *
3353  * The qp_attr_mask may be used to limit the query to gathering only the
3354  * selected attributes.
3355  */
3356 int ib_query_qp(struct ib_qp *qp,
3357 		struct ib_qp_attr *qp_attr,
3358 		int qp_attr_mask,
3359 		struct ib_qp_init_attr *qp_init_attr);
3360 
3361 /**
3362  * ib_destroy_qp - Destroys the specified QP.
3363  * @qp: The QP to destroy.
3364  */
3365 int ib_destroy_qp(struct ib_qp *qp);
3366 
3367 /**
3368  * ib_open_qp - Obtain a reference to an existing sharable QP.
3369  * @xrcd - XRC domain
3370  * @qp_open_attr: Attributes identifying the QP to open.
3371  *
3372  * Returns a reference to a sharable QP.
3373  */
3374 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3375 			 struct ib_qp_open_attr *qp_open_attr);
3376 
3377 /**
3378  * ib_close_qp - Release an external reference to a QP.
3379  * @qp: The QP handle to release
3380  *
3381  * The opened QP handle is released by the caller.  The underlying
3382  * shared QP is not destroyed until all internal references are released.
3383  */
3384 int ib_close_qp(struct ib_qp *qp);
3385 
3386 /**
3387  * ib_post_send - Posts a list of work requests to the send queue of
3388  *   the specified QP.
3389  * @qp: The QP to post the work request on.
3390  * @send_wr: A list of work requests to post on the send queue.
3391  * @bad_send_wr: On an immediate failure, this parameter will reference
3392  *   the work request that failed to be posted on the QP.
3393  *
3394  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3395  * error is returned, the QP state shall not be affected,
3396  * ib_post_send() will return an immediate error after queueing any
3397  * earlier work requests in the list.
3398  */
ib_post_send(struct ib_qp * qp,const struct ib_send_wr * send_wr,const struct ib_send_wr ** bad_send_wr)3399 static inline int ib_post_send(struct ib_qp *qp,
3400 			       const struct ib_send_wr *send_wr,
3401 			       const struct ib_send_wr **bad_send_wr)
3402 {
3403 	const struct ib_send_wr *dummy;
3404 
3405 	return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy);
3406 }
3407 
3408 /**
3409  * ib_post_recv - Posts a list of work requests to the receive queue of
3410  *   the specified QP.
3411  * @qp: The QP to post the work request on.
3412  * @recv_wr: A list of work requests to post on the receive queue.
3413  * @bad_recv_wr: On an immediate failure, this parameter will reference
3414  *   the work request that failed to be posted on the QP.
3415  */
ib_post_recv(struct ib_qp * qp,const struct ib_recv_wr * recv_wr,const struct ib_recv_wr ** bad_recv_wr)3416 static inline int ib_post_recv(struct ib_qp *qp,
3417 			       const struct ib_recv_wr *recv_wr,
3418 			       const struct ib_recv_wr **bad_recv_wr)
3419 {
3420 	const struct ib_recv_wr *dummy;
3421 
3422 	return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3423 }
3424 
3425 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3426 			    int nr_cqe, int comp_vector,
3427 			    enum ib_poll_context poll_ctx, const char *caller);
3428 #define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3429 	__ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3430 
3431 void ib_free_cq(struct ib_cq *cq);
3432 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3433 
3434 /**
3435  * ib_create_cq - Creates a CQ on the specified device.
3436  * @device: The device on which to create the CQ.
3437  * @comp_handler: A user-specified callback that is invoked when a
3438  *   completion event occurs on the CQ.
3439  * @event_handler: A user-specified callback that is invoked when an
3440  *   asynchronous event not associated with a completion occurs on the CQ.
3441  * @cq_context: Context associated with the CQ returned to the user via
3442  *   the associated completion and event handlers.
3443  * @cq_attr: The attributes the CQ should be created upon.
3444  *
3445  * Users can examine the cq structure to determine the actual CQ size.
3446  */
3447 struct ib_cq *__ib_create_cq(struct ib_device *device,
3448 			     ib_comp_handler comp_handler,
3449 			     void (*event_handler)(struct ib_event *, void *),
3450 			     void *cq_context,
3451 			     const struct ib_cq_init_attr *cq_attr,
3452 			     const char *caller);
3453 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3454 	__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3455 
3456 /**
3457  * ib_resize_cq - Modifies the capacity of the CQ.
3458  * @cq: The CQ to resize.
3459  * @cqe: The minimum size of the CQ.
3460  *
3461  * Users can examine the cq structure to determine the actual CQ size.
3462  */
3463 int ib_resize_cq(struct ib_cq *cq, int cqe);
3464 
3465 /**
3466  * rdma_set_cq_moderation - Modifies moderation params of the CQ
3467  * @cq: The CQ to modify.
3468  * @cq_count: number of CQEs that will trigger an event
3469  * @cq_period: max period of time in usec before triggering an event
3470  *
3471  */
3472 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3473 
3474 /**
3475  * ib_destroy_cq - Destroys the specified CQ.
3476  * @cq: The CQ to destroy.
3477  */
3478 int ib_destroy_cq(struct ib_cq *cq);
3479 
3480 /**
3481  * ib_poll_cq - poll a CQ for completion(s)
3482  * @cq:the CQ being polled
3483  * @num_entries:maximum number of completions to return
3484  * @wc:array of at least @num_entries &struct ib_wc where completions
3485  *   will be returned
3486  *
3487  * Poll a CQ for (possibly multiple) completions.  If the return value
3488  * is < 0, an error occurred.  If the return value is >= 0, it is the
3489  * number of completions returned.  If the return value is
3490  * non-negative and < num_entries, then the CQ was emptied.
3491  */
ib_poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)3492 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3493 			     struct ib_wc *wc)
3494 {
3495 	return cq->device->poll_cq(cq, num_entries, wc);
3496 }
3497 
3498 /**
3499  * ib_req_notify_cq - Request completion notification on a CQ.
3500  * @cq: The CQ to generate an event for.
3501  * @flags:
3502  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3503  *   to request an event on the next solicited event or next work
3504  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3505  *   may also be |ed in to request a hint about missed events, as
3506  *   described below.
3507  *
3508  * Return Value:
3509  *    < 0 means an error occurred while requesting notification
3510  *   == 0 means notification was requested successfully, and if
3511  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3512  *        were missed and it is safe to wait for another event.  In
3513  *        this case is it guaranteed that any work completions added
3514  *        to the CQ since the last CQ poll will trigger a completion
3515  *        notification event.
3516  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3517  *        in.  It means that the consumer must poll the CQ again to
3518  *        make sure it is empty to avoid missing an event because of a
3519  *        race between requesting notification and an entry being
3520  *        added to the CQ.  This return value means it is possible
3521  *        (but not guaranteed) that a work completion has been added
3522  *        to the CQ since the last poll without triggering a
3523  *        completion notification event.
3524  */
ib_req_notify_cq(struct ib_cq * cq,enum ib_cq_notify_flags flags)3525 static inline int ib_req_notify_cq(struct ib_cq *cq,
3526 				   enum ib_cq_notify_flags flags)
3527 {
3528 	return cq->device->req_notify_cq(cq, flags);
3529 }
3530 
3531 /**
3532  * ib_req_ncomp_notif - Request completion notification when there are
3533  *   at least the specified number of unreaped completions on the CQ.
3534  * @cq: The CQ to generate an event for.
3535  * @wc_cnt: The number of unreaped completions that should be on the
3536  *   CQ before an event is generated.
3537  */
ib_req_ncomp_notif(struct ib_cq * cq,int wc_cnt)3538 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3539 {
3540 	return cq->device->req_ncomp_notif ?
3541 		cq->device->req_ncomp_notif(cq, wc_cnt) :
3542 		-ENOSYS;
3543 }
3544 
3545 /**
3546  * ib_dma_mapping_error - check a DMA addr for error
3547  * @dev: The device for which the dma_addr was created
3548  * @dma_addr: The DMA address to check
3549  */
ib_dma_mapping_error(struct ib_device * dev,u64 dma_addr)3550 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3551 {
3552 	return dma_mapping_error(dev->dma_device, dma_addr);
3553 }
3554 
3555 /**
3556  * ib_dma_map_single - Map a kernel virtual address to DMA address
3557  * @dev: The device for which the dma_addr is to be created
3558  * @cpu_addr: The kernel virtual address
3559  * @size: The size of the region in bytes
3560  * @direction: The direction of the DMA
3561  */
ib_dma_map_single(struct ib_device * dev,void * cpu_addr,size_t size,enum dma_data_direction direction)3562 static inline u64 ib_dma_map_single(struct ib_device *dev,
3563 				    void *cpu_addr, size_t size,
3564 				    enum dma_data_direction direction)
3565 {
3566 	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3567 }
3568 
3569 /**
3570  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3571  * @dev: The device for which the DMA address was created
3572  * @addr: The DMA address
3573  * @size: The size of the region in bytes
3574  * @direction: The direction of the DMA
3575  */
ib_dma_unmap_single(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)3576 static inline void ib_dma_unmap_single(struct ib_device *dev,
3577 				       u64 addr, size_t size,
3578 				       enum dma_data_direction direction)
3579 {
3580 	dma_unmap_single(dev->dma_device, addr, size, direction);
3581 }
3582 
3583 /**
3584  * ib_dma_map_page - Map a physical page to DMA address
3585  * @dev: The device for which the dma_addr is to be created
3586  * @page: The page to be mapped
3587  * @offset: The offset within the page
3588  * @size: The size of the region in bytes
3589  * @direction: The direction of the DMA
3590  */
ib_dma_map_page(struct ib_device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction)3591 static inline u64 ib_dma_map_page(struct ib_device *dev,
3592 				  struct page *page,
3593 				  unsigned long offset,
3594 				  size_t size,
3595 					 enum dma_data_direction direction)
3596 {
3597 	return dma_map_page(dev->dma_device, page, offset, size, direction);
3598 }
3599 
3600 /**
3601  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3602  * @dev: The device for which the DMA address was created
3603  * @addr: The DMA address
3604  * @size: The size of the region in bytes
3605  * @direction: The direction of the DMA
3606  */
ib_dma_unmap_page(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction direction)3607 static inline void ib_dma_unmap_page(struct ib_device *dev,
3608 				     u64 addr, size_t size,
3609 				     enum dma_data_direction direction)
3610 {
3611 	dma_unmap_page(dev->dma_device, addr, size, direction);
3612 }
3613 
3614 /**
3615  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3616  * @dev: The device for which the DMA addresses are to be created
3617  * @sg: The array of scatter/gather entries
3618  * @nents: The number of scatter/gather entries
3619  * @direction: The direction of the DMA
3620  */
ib_dma_map_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)3621 static inline int ib_dma_map_sg(struct ib_device *dev,
3622 				struct scatterlist *sg, int nents,
3623 				enum dma_data_direction direction)
3624 {
3625 	return dma_map_sg(dev->dma_device, sg, nents, direction);
3626 }
3627 
3628 /**
3629  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3630  * @dev: The device for which the DMA addresses were created
3631  * @sg: The array of scatter/gather entries
3632  * @nents: The number of scatter/gather entries
3633  * @direction: The direction of the DMA
3634  */
ib_dma_unmap_sg(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction)3635 static inline void ib_dma_unmap_sg(struct ib_device *dev,
3636 				   struct scatterlist *sg, int nents,
3637 				   enum dma_data_direction direction)
3638 {
3639 	dma_unmap_sg(dev->dma_device, sg, nents, direction);
3640 }
3641 
ib_dma_map_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long dma_attrs)3642 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3643 				      struct scatterlist *sg, int nents,
3644 				      enum dma_data_direction direction,
3645 				      unsigned long dma_attrs)
3646 {
3647 	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3648 				dma_attrs);
3649 }
3650 
ib_dma_unmap_sg_attrs(struct ib_device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,unsigned long dma_attrs)3651 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3652 					 struct scatterlist *sg, int nents,
3653 					 enum dma_data_direction direction,
3654 					 unsigned long dma_attrs)
3655 {
3656 	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3657 }
3658 /**
3659  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3660  * @dev: The device for which the DMA addresses were created
3661  * @sg: The scatter/gather entry
3662  *
3663  * Note: this function is obsolete. To do: change all occurrences of
3664  * ib_sg_dma_address() into sg_dma_address().
3665  */
ib_sg_dma_address(struct ib_device * dev,struct scatterlist * sg)3666 static inline u64 ib_sg_dma_address(struct ib_device *dev,
3667 				    struct scatterlist *sg)
3668 {
3669 	return sg_dma_address(sg);
3670 }
3671 
3672 /**
3673  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3674  * @dev: The device for which the DMA addresses were created
3675  * @sg: The scatter/gather entry
3676  *
3677  * Note: this function is obsolete. To do: change all occurrences of
3678  * ib_sg_dma_len() into sg_dma_len().
3679  */
ib_sg_dma_len(struct ib_device * dev,struct scatterlist * sg)3680 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3681 					 struct scatterlist *sg)
3682 {
3683 	return sg_dma_len(sg);
3684 }
3685 
3686 /**
3687  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3688  * @dev: The device for which the DMA address was created
3689  * @addr: The DMA address
3690  * @size: The size of the region in bytes
3691  * @dir: The direction of the DMA
3692  */
ib_dma_sync_single_for_cpu(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)3693 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3694 					      u64 addr,
3695 					      size_t size,
3696 					      enum dma_data_direction dir)
3697 {
3698 	dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3699 }
3700 
3701 /**
3702  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3703  * @dev: The device for which the DMA address was created
3704  * @addr: The DMA address
3705  * @size: The size of the region in bytes
3706  * @dir: The direction of the DMA
3707  */
ib_dma_sync_single_for_device(struct ib_device * dev,u64 addr,size_t size,enum dma_data_direction dir)3708 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3709 						 u64 addr,
3710 						 size_t size,
3711 						 enum dma_data_direction dir)
3712 {
3713 	dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3714 }
3715 
3716 /**
3717  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3718  * @dev: The device for which the DMA address is requested
3719  * @size: The size of the region to allocate in bytes
3720  * @dma_handle: A pointer for returning the DMA address of the region
3721  * @flag: memory allocator flags
3722  */
ib_dma_alloc_coherent(struct ib_device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)3723 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3724 					   size_t size,
3725 					   dma_addr_t *dma_handle,
3726 					   gfp_t flag)
3727 {
3728 	return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3729 }
3730 
3731 /**
3732  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3733  * @dev: The device for which the DMA addresses were allocated
3734  * @size: The size of the region
3735  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3736  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3737  */
ib_dma_free_coherent(struct ib_device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)3738 static inline void ib_dma_free_coherent(struct ib_device *dev,
3739 					size_t size, void *cpu_addr,
3740 					dma_addr_t dma_handle)
3741 {
3742 	dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3743 }
3744 
3745 /**
3746  * ib_dereg_mr - Deregisters a memory region and removes it from the
3747  *   HCA translation table.
3748  * @mr: The memory region to deregister.
3749  *
3750  * This function can fail, if the memory region has memory windows bound to it.
3751  */
3752 int ib_dereg_mr(struct ib_mr *mr);
3753 
3754 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3755 			  enum ib_mr_type mr_type,
3756 			  u32 max_num_sg);
3757 
3758 /**
3759  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3760  *   R_Key and L_Key.
3761  * @mr - struct ib_mr pointer to be updated.
3762  * @newkey - new key to be used.
3763  */
ib_update_fast_reg_key(struct ib_mr * mr,u8 newkey)3764 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3765 {
3766 	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3767 	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3768 }
3769 
3770 /**
3771  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3772  * for calculating a new rkey for type 2 memory windows.
3773  * @rkey - the rkey to increment.
3774  */
ib_inc_rkey(u32 rkey)3775 static inline u32 ib_inc_rkey(u32 rkey)
3776 {
3777 	const u32 mask = 0x000000ff;
3778 	return ((rkey + 1) & mask) | (rkey & ~mask);
3779 }
3780 
3781 /**
3782  * ib_alloc_fmr - Allocates a unmapped fast memory region.
3783  * @pd: The protection domain associated with the unmapped region.
3784  * @mr_access_flags: Specifies the memory access rights.
3785  * @fmr_attr: Attributes of the unmapped region.
3786  *
3787  * A fast memory region must be mapped before it can be used as part of
3788  * a work request.
3789  */
3790 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3791 			    int mr_access_flags,
3792 			    struct ib_fmr_attr *fmr_attr);
3793 
3794 /**
3795  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3796  * @fmr: The fast memory region to associate with the pages.
3797  * @page_list: An array of physical pages to map to the fast memory region.
3798  * @list_len: The number of pages in page_list.
3799  * @iova: The I/O virtual address to use with the mapped region.
3800  */
ib_map_phys_fmr(struct ib_fmr * fmr,u64 * page_list,int list_len,u64 iova)3801 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3802 				  u64 *page_list, int list_len,
3803 				  u64 iova)
3804 {
3805 	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3806 }
3807 
3808 /**
3809  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3810  * @fmr_list: A linked list of fast memory regions to unmap.
3811  */
3812 int ib_unmap_fmr(struct list_head *fmr_list);
3813 
3814 /**
3815  * ib_dealloc_fmr - Deallocates a fast memory region.
3816  * @fmr: The fast memory region to deallocate.
3817  */
3818 int ib_dealloc_fmr(struct ib_fmr *fmr);
3819 
3820 /**
3821  * ib_attach_mcast - Attaches the specified QP to a multicast group.
3822  * @qp: QP to attach to the multicast group.  The QP must be type
3823  *   IB_QPT_UD.
3824  * @gid: Multicast group GID.
3825  * @lid: Multicast group LID in host byte order.
3826  *
3827  * In order to send and receive multicast packets, subnet
3828  * administration must have created the multicast group and configured
3829  * the fabric appropriately.  The port associated with the specified
3830  * QP must also be a member of the multicast group.
3831  */
3832 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3833 
3834 /**
3835  * ib_detach_mcast - Detaches the specified QP from a multicast group.
3836  * @qp: QP to detach from the multicast group.
3837  * @gid: Multicast group GID.
3838  * @lid: Multicast group LID in host byte order.
3839  */
3840 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3841 
3842 /**
3843  * ib_alloc_xrcd - Allocates an XRC domain.
3844  * @device: The device on which to allocate the XRC domain.
3845  * @caller: Module name for kernel consumers
3846  */
3847 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3848 #define ib_alloc_xrcd(device) \
3849 	__ib_alloc_xrcd((device), KBUILD_MODNAME)
3850 
3851 /**
3852  * ib_dealloc_xrcd - Deallocates an XRC domain.
3853  * @xrcd: The XRC domain to deallocate.
3854  */
3855 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3856 
ib_check_mr_access(int flags)3857 static inline int ib_check_mr_access(int flags)
3858 {
3859 	/*
3860 	 * Local write permission is required if remote write or
3861 	 * remote atomic permission is also requested.
3862 	 */
3863 	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3864 	    !(flags & IB_ACCESS_LOCAL_WRITE))
3865 		return -EINVAL;
3866 
3867 	if (flags & ~IB_ACCESS_SUPPORTED)
3868 		return -EINVAL;
3869 
3870 	return 0;
3871 }
3872 
ib_access_writable(int access_flags)3873 static inline bool ib_access_writable(int access_flags)
3874 {
3875 	/*
3876 	 * We have writable memory backing the MR if any of the following
3877 	 * access flags are set.  "Local write" and "remote write" obviously
3878 	 * require write access.  "Remote atomic" can do things like fetch and
3879 	 * add, which will modify memory, and "MW bind" can change permissions
3880 	 * by binding a window.
3881 	 */
3882 	return access_flags &
3883 		(IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
3884 		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
3885 }
3886 
3887 /**
3888  * ib_check_mr_status: lightweight check of MR status.
3889  *     This routine may provide status checks on a selected
3890  *     ib_mr. first use is for signature status check.
3891  *
3892  * @mr: A memory region.
3893  * @check_mask: Bitmask of which checks to perform from
3894  *     ib_mr_status_check enumeration.
3895  * @mr_status: The container of relevant status checks.
3896  *     failed checks will be indicated in the status bitmask
3897  *     and the relevant info shall be in the error item.
3898  */
3899 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3900 		       struct ib_mr_status *mr_status);
3901 
3902 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3903 					    u16 pkey, const union ib_gid *gid,
3904 					    const struct sockaddr *addr);
3905 struct ib_wq *ib_create_wq(struct ib_pd *pd,
3906 			   struct ib_wq_init_attr *init_attr);
3907 int ib_destroy_wq(struct ib_wq *wq);
3908 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3909 		 u32 wq_attr_mask);
3910 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3911 						 struct ib_rwq_ind_table_init_attr*
3912 						 wq_ind_table_init_attr);
3913 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3914 
3915 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3916 		 unsigned int *sg_offset, unsigned int page_size);
3917 
3918 static inline int
ib_map_mr_sg_zbva(struct ib_mr * mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset,unsigned int page_size)3919 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3920 		  unsigned int *sg_offset, unsigned int page_size)
3921 {
3922 	int n;
3923 
3924 	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3925 	mr->iova = 0;
3926 
3927 	return n;
3928 }
3929 
3930 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3931 		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3932 
3933 void ib_drain_rq(struct ib_qp *qp);
3934 void ib_drain_sq(struct ib_qp *qp);
3935 void ib_drain_qp(struct ib_qp *qp);
3936 
3937 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3938 
rdma_ah_retrieve_dmac(struct rdma_ah_attr * attr)3939 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3940 {
3941 	if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3942 		return attr->roce.dmac;
3943 	return NULL;
3944 }
3945 
rdma_ah_set_dlid(struct rdma_ah_attr * attr,u32 dlid)3946 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3947 {
3948 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3949 		attr->ib.dlid = (u16)dlid;
3950 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3951 		attr->opa.dlid = dlid;
3952 }
3953 
rdma_ah_get_dlid(const struct rdma_ah_attr * attr)3954 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3955 {
3956 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3957 		return attr->ib.dlid;
3958 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3959 		return attr->opa.dlid;
3960 	return 0;
3961 }
3962 
rdma_ah_set_sl(struct rdma_ah_attr * attr,u8 sl)3963 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3964 {
3965 	attr->sl = sl;
3966 }
3967 
rdma_ah_get_sl(const struct rdma_ah_attr * attr)3968 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3969 {
3970 	return attr->sl;
3971 }
3972 
rdma_ah_set_path_bits(struct rdma_ah_attr * attr,u8 src_path_bits)3973 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3974 					 u8 src_path_bits)
3975 {
3976 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3977 		attr->ib.src_path_bits = src_path_bits;
3978 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3979 		attr->opa.src_path_bits = src_path_bits;
3980 }
3981 
rdma_ah_get_path_bits(const struct rdma_ah_attr * attr)3982 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3983 {
3984 	if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3985 		return attr->ib.src_path_bits;
3986 	else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3987 		return attr->opa.src_path_bits;
3988 	return 0;
3989 }
3990 
rdma_ah_set_make_grd(struct rdma_ah_attr * attr,bool make_grd)3991 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3992 					bool make_grd)
3993 {
3994 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3995 		attr->opa.make_grd = make_grd;
3996 }
3997 
rdma_ah_get_make_grd(const struct rdma_ah_attr * attr)3998 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3999 {
4000 	if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4001 		return attr->opa.make_grd;
4002 	return false;
4003 }
4004 
rdma_ah_set_port_num(struct rdma_ah_attr * attr,u8 port_num)4005 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4006 {
4007 	attr->port_num = port_num;
4008 }
4009 
rdma_ah_get_port_num(const struct rdma_ah_attr * attr)4010 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4011 {
4012 	return attr->port_num;
4013 }
4014 
rdma_ah_set_static_rate(struct rdma_ah_attr * attr,u8 static_rate)4015 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4016 					   u8 static_rate)
4017 {
4018 	attr->static_rate = static_rate;
4019 }
4020 
rdma_ah_get_static_rate(const struct rdma_ah_attr * attr)4021 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4022 {
4023 	return attr->static_rate;
4024 }
4025 
rdma_ah_set_ah_flags(struct rdma_ah_attr * attr,enum ib_ah_flags flag)4026 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4027 					enum ib_ah_flags flag)
4028 {
4029 	attr->ah_flags = flag;
4030 }
4031 
4032 static inline enum ib_ah_flags
rdma_ah_get_ah_flags(const struct rdma_ah_attr * attr)4033 		rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4034 {
4035 	return attr->ah_flags;
4036 }
4037 
4038 static inline const struct ib_global_route
rdma_ah_read_grh(const struct rdma_ah_attr * attr)4039 		*rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4040 {
4041 	return &attr->grh;
4042 }
4043 
4044 /*To retrieve and modify the grh */
4045 static inline struct ib_global_route
rdma_ah_retrieve_grh(struct rdma_ah_attr * attr)4046 		*rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4047 {
4048 	return &attr->grh;
4049 }
4050 
rdma_ah_set_dgid_raw(struct rdma_ah_attr * attr,void * dgid)4051 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4052 {
4053 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4054 
4055 	memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4056 }
4057 
rdma_ah_set_subnet_prefix(struct rdma_ah_attr * attr,__be64 prefix)4058 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4059 					     __be64 prefix)
4060 {
4061 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4062 
4063 	grh->dgid.global.subnet_prefix = prefix;
4064 }
4065 
rdma_ah_set_interface_id(struct rdma_ah_attr * attr,__be64 if_id)4066 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4067 					    __be64 if_id)
4068 {
4069 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4070 
4071 	grh->dgid.global.interface_id = if_id;
4072 }
4073 
rdma_ah_set_grh(struct rdma_ah_attr * attr,union ib_gid * dgid,u32 flow_label,u8 sgid_index,u8 hop_limit,u8 traffic_class)4074 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4075 				   union ib_gid *dgid, u32 flow_label,
4076 				   u8 sgid_index, u8 hop_limit,
4077 				   u8 traffic_class)
4078 {
4079 	struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4080 
4081 	attr->ah_flags = IB_AH_GRH;
4082 	if (dgid)
4083 		grh->dgid = *dgid;
4084 	grh->flow_label = flow_label;
4085 	grh->sgid_index = sgid_index;
4086 	grh->hop_limit = hop_limit;
4087 	grh->traffic_class = traffic_class;
4088 	grh->sgid_attr = NULL;
4089 }
4090 
4091 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4092 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4093 			     u32 flow_label, u8 hop_limit, u8 traffic_class,
4094 			     const struct ib_gid_attr *sgid_attr);
4095 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4096 		       const struct rdma_ah_attr *src);
4097 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4098 			  const struct rdma_ah_attr *new);
4099 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4100 
4101 /**
4102  * rdma_ah_find_type - Return address handle type.
4103  *
4104  * @dev: Device to be checked
4105  * @port_num: Port number
4106  */
rdma_ah_find_type(struct ib_device * dev,u8 port_num)4107 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4108 						       u8 port_num)
4109 {
4110 	if (rdma_protocol_roce(dev, port_num))
4111 		return RDMA_AH_ATTR_TYPE_ROCE;
4112 	if (rdma_protocol_ib(dev, port_num)) {
4113 		if (rdma_cap_opa_ah(dev, port_num))
4114 			return RDMA_AH_ATTR_TYPE_OPA;
4115 		return RDMA_AH_ATTR_TYPE_IB;
4116 	}
4117 
4118 	return RDMA_AH_ATTR_TYPE_UNDEFINED;
4119 }
4120 
4121 /**
4122  * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4123  *     In the current implementation the only way to get
4124  *     get the 32bit lid is from other sources for OPA.
4125  *     For IB, lids will always be 16bits so cast the
4126  *     value accordingly.
4127  *
4128  * @lid: A 32bit LID
4129  */
ib_lid_cpu16(u32 lid)4130 static inline u16 ib_lid_cpu16(u32 lid)
4131 {
4132 	WARN_ON_ONCE(lid & 0xFFFF0000);
4133 	return (u16)lid;
4134 }
4135 
4136 /**
4137  * ib_lid_be16 - Return lid in 16bit BE encoding.
4138  *
4139  * @lid: A 32bit LID
4140  */
ib_lid_be16(u32 lid)4141 static inline __be16 ib_lid_be16(u32 lid)
4142 {
4143 	WARN_ON_ONCE(lid & 0xFFFF0000);
4144 	return cpu_to_be16((u16)lid);
4145 }
4146 
4147 /**
4148  * ib_get_vector_affinity - Get the affinity mappings of a given completion
4149  *   vector
4150  * @device:         the rdma device
4151  * @comp_vector:    index of completion vector
4152  *
4153  * Returns NULL on failure, otherwise a corresponding cpu map of the
4154  * completion vector (returns all-cpus map if the device driver doesn't
4155  * implement get_vector_affinity).
4156  */
4157 static inline const struct cpumask *
ib_get_vector_affinity(struct ib_device * device,int comp_vector)4158 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4159 {
4160 	if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4161 	    !device->get_vector_affinity)
4162 		return NULL;
4163 
4164 	return device->get_vector_affinity(device, comp_vector);
4165 
4166 }
4167 
ib_set_flow(struct ib_uobject * uobj,struct ib_flow * ibflow,struct ib_qp * qp,struct ib_device * device)4168 static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
4169 			       struct ib_qp *qp, struct ib_device *device)
4170 {
4171 	uobj->object = ibflow;
4172 	ibflow->uobject = uobj;
4173 
4174 	if (qp) {
4175 		atomic_inc(&qp->usecnt);
4176 		ibflow->qp = qp;
4177 	}
4178 
4179 	ibflow->device = device;
4180 }
4181 
4182 /**
4183  * rdma_roce_rescan_device - Rescan all of the network devices in the system
4184  * and add their gids, as needed, to the relevant RoCE devices.
4185  *
4186  * @device:         the rdma device
4187  */
4188 void rdma_roce_rescan_device(struct ib_device *ibdev);
4189 
4190 struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile);
4191 
4192 int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
4193 			       struct uverbs_attr_bundle *attrs);
4194 #endif /* IB_VERBS_H */
4195