1 #ifndef DEF_RDMA_VT_H
2 #define DEF_RDMA_VT_H
3
4 /*
5 * Copyright(c) 2016 - 2018 Intel Corporation.
6 *
7 * This file is provided under a dual BSD/GPLv2 license. When using or
8 * redistributing this file, you may do so under either license.
9 *
10 * GPL LICENSE SUMMARY
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * BSD LICENSE
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 /*
52 * Structure that low level drivers will populate in order to register with the
53 * rdmavt layer.
54 */
55
56 #include <linux/spinlock.h>
57 #include <linux/list.h>
58 #include <linux/hash.h>
59 #include <rdma/ib_verbs.h>
60 #include <rdma/ib_mad.h>
61 #include <rdma/rdmavt_mr.h>
62 #include <rdma/rdmavt_qp.h>
63
64 #define RVT_MAX_PKEY_VALUES 16
65
66 #define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */
67 #define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/
68 #define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */
69
70 struct trap_list {
71 u32 list_len;
72 struct list_head list;
73 };
74
75 struct rvt_ibport {
76 struct rvt_qp __rcu *qp[2];
77 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
78 struct rb_root mcast_tree;
79 spinlock_t lock; /* protect changes in this struct */
80
81 /* non-zero when timer is set */
82 unsigned long mkey_lease_timeout;
83 unsigned long trap_timeout;
84 __be64 gid_prefix; /* in network order */
85 __be64 mkey;
86 u64 tid;
87 u32 port_cap_flags;
88 u16 port_cap3_flags;
89 u32 pma_sample_start;
90 u32 pma_sample_interval;
91 __be16 pma_counter_select[5];
92 u16 pma_tag;
93 u16 mkey_lease_period;
94 u32 sm_lid;
95 u8 sm_sl;
96 u8 mkeyprot;
97 u8 subnet_timeout;
98 u8 vl_high_limit;
99
100 /*
101 * Driver is expected to keep these up to date. These
102 * counters are informational only and not required to be
103 * completely accurate.
104 */
105 u64 n_rc_resends;
106 u64 n_seq_naks;
107 u64 n_rdma_seq;
108 u64 n_rnr_naks;
109 u64 n_other_naks;
110 u64 n_loop_pkts;
111 u64 n_pkt_drops;
112 u64 n_vl15_dropped;
113 u64 n_rc_timeouts;
114 u64 n_dmawait;
115 u64 n_unaligned;
116 u64 n_rc_dupreq;
117 u64 n_rc_seqnak;
118 u16 pkey_violations;
119 u16 qkey_violations;
120 u16 mkey_violations;
121
122 /* Hot-path per CPU counters to avoid cacheline trading to update */
123 u64 z_rc_acks;
124 u64 z_rc_qacks;
125 u64 z_rc_delayed_comp;
126 u64 __percpu *rc_acks;
127 u64 __percpu *rc_qacks;
128 u64 __percpu *rc_delayed_comp;
129
130 void *priv; /* driver private data */
131
132 /*
133 * The pkey table is allocated and maintained by the driver. Drivers
134 * need to have access to this before registering with rdmav. However
135 * rdmavt will need access to it so drivers need to proviee this during
136 * the attach port API call.
137 */
138 u16 *pkey_table;
139
140 struct rvt_ah *sm_ah;
141
142 /*
143 * Keep a list of traps that have not been repressed. They will be
144 * resent based on trap_timer.
145 */
146 struct trap_list trap_lists[RVT_MAX_TRAP_LISTS];
147 struct timer_list trap_timer;
148 };
149
150 #define RVT_CQN_MAX 16 /* maximum length of cq name */
151
152 /*
153 * Things that are driver specific, module parameters in hfi1 and qib
154 */
155 struct rvt_driver_params {
156 struct ib_device_attr props;
157
158 /*
159 * Anything driver specific that is not covered by props
160 * For instance special module parameters. Goes here.
161 */
162 unsigned int lkey_table_size;
163 unsigned int qp_table_size;
164 int qpn_start;
165 int qpn_inc;
166 int qpn_res_start;
167 int qpn_res_end;
168 int nports;
169 int npkeys;
170 int node;
171 int psn_mask;
172 int psn_shift;
173 int psn_modify_mask;
174 u32 core_cap_flags;
175 u32 max_mad_size;
176 u8 qos_shift;
177 u8 max_rdma_atomic;
178 u8 reserved_operations;
179 };
180
181 /* Protection domain */
182 struct rvt_pd {
183 struct ib_pd ibpd;
184 bool user;
185 };
186
187 /* Address handle */
188 struct rvt_ah {
189 struct ib_ah ibah;
190 struct rdma_ah_attr attr;
191 atomic_t refcount;
192 u8 vl;
193 u8 log_pmtu;
194 };
195
196 struct rvt_dev_info;
197 struct rvt_swqe;
198 struct rvt_driver_provided {
199 /*
200 * Which functions are required depends on which verbs rdmavt is
201 * providing and which verbs the driver is overriding. See
202 * check_support() for details.
203 */
204
205 /* hot path calldowns in a single cacheline */
206
207 /*
208 * Give the driver a notice that there is send work to do. It is up to
209 * the driver to generally push the packets out, this just queues the
210 * work with the driver. There are two variants here. The no_lock
211 * version requires the s_lock not to be held. The other assumes the
212 * s_lock is held.
213 */
214 void (*schedule_send)(struct rvt_qp *qp);
215 void (*schedule_send_no_lock)(struct rvt_qp *qp);
216
217 /* Driver specific work request checking */
218 int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
219
220 /*
221 * Sometimes rdmavt needs to kick the driver's send progress. That is
222 * done by this call back.
223 */
224 void (*do_send)(struct rvt_qp *qp);
225
226 /* Passed to ib core registration. Callback to create syfs files */
227 int (*port_callback)(struct ib_device *, u8, struct kobject *);
228
229 /*
230 * Returns a pointer to the undelying hardware's PCI device. This is
231 * used to display information as to what hardware is being referenced
232 * in an output message
233 */
234 struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
235
236 /*
237 * Allocate a private queue pair data structure for driver specific
238 * information which is opaque to rdmavt. Errors are returned via
239 * ERR_PTR(err). The driver is free to return NULL or a valid
240 * pointer.
241 */
242 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
243
244 /*
245 * Free the driver's private qp structure.
246 */
247 void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
248
249 /*
250 * Inform the driver the particular qp in quesiton has been reset so
251 * that it can clean up anything it needs to.
252 */
253 void (*notify_qp_reset)(struct rvt_qp *qp);
254
255 /*
256 * Get a path mtu from the driver based on qp attributes.
257 */
258 int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
259 struct ib_qp_attr *attr);
260
261 /*
262 * Notify driver that it needs to flush any outstanding IO requests that
263 * are waiting on a qp.
264 */
265 void (*flush_qp_waiters)(struct rvt_qp *qp);
266
267 /*
268 * Notify driver to stop its queue of sending packets. Nothing else
269 * should be posted to the queue pair after this has been called.
270 */
271 void (*stop_send_queue)(struct rvt_qp *qp);
272
273 /*
274 * Have the drivr drain any in progress operations
275 */
276 void (*quiesce_qp)(struct rvt_qp *qp);
277
278 /*
279 * Inform the driver a qp has went to error state.
280 */
281 void (*notify_error_qp)(struct rvt_qp *qp);
282
283 /*
284 * Get an MTU for a qp.
285 */
286 u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
287 u32 pmtu);
288 /*
289 * Convert an mtu to a path mtu
290 */
291 int (*mtu_to_path_mtu)(u32 mtu);
292
293 /*
294 * Get the guid of a port in big endian byte order
295 */
296 int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
297 int guid_index, __be64 *guid);
298
299 /*
300 * Query driver for the state of the port.
301 */
302 int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
303 struct ib_port_attr *props);
304
305 /*
306 * Tell driver to shutdown a port
307 */
308 int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
309
310 /* Tell driver to send a trap for changed port capabilities */
311 void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
312
313 /*
314 * The following functions can be safely ignored completely. Any use of
315 * these is checked for NULL before blindly calling. Rdmavt should also
316 * be functional if drivers omit these.
317 */
318
319 /* Called to inform the driver that all qps should now be freed. */
320 unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
321
322 /* Driver specific AH validation */
323 int (*check_ah)(struct ib_device *, struct rdma_ah_attr *);
324
325 /* Inform the driver a new AH has been created */
326 void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *,
327 struct rvt_ah *);
328
329 /* Let the driver pick the next queue pair number*/
330 int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
331 enum ib_qp_type type, u8 port_num);
332
333 /* Determine if its safe or allowed to modify the qp */
334 int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
335 int attr_mask, struct ib_udata *udata);
336
337 /* Driver specific QP modification/notification-of */
338 void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
339 int attr_mask, struct ib_udata *udata);
340
341 /* Notify driver a mad agent has been created */
342 void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
343
344 /* Notify driver a mad agent has been removed */
345 void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
346
347 /* Notify driver to restart rc */
348 void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
349
350 /* Get and return CPU to pin CQ processing thread */
351 int (*comp_vect_cpu_lookup)(struct rvt_dev_info *rdi, int comp_vect);
352 };
353
354 struct rvt_dev_info {
355 struct ib_device ibdev; /* Keep this first. Nothing above here */
356
357 /*
358 * Prior to calling for registration the driver will be responsible for
359 * allocating space for this structure.
360 *
361 * The driver will also be responsible for filling in certain members of
362 * dparms.props. The driver needs to fill in dparms exactly as it would
363 * want values reported to a ULP. This will be returned to the caller
364 * in rdmavt's device. The driver should also therefore refrain from
365 * modifying this directly after registration with rdmavt.
366 */
367
368 /* Driver specific properties */
369 struct rvt_driver_params dparms;
370
371 /* post send table */
372 const struct rvt_operation_params *post_parms;
373
374 /* Driver specific helper functions */
375 struct rvt_driver_provided driver_f;
376
377 struct rvt_mregion __rcu *dma_mr;
378 struct rvt_lkey_table lkey_table;
379
380 /* Internal use */
381 int n_pds_allocated;
382 spinlock_t n_pds_lock; /* Protect pd allocated count */
383
384 int n_ahs_allocated;
385 spinlock_t n_ahs_lock; /* Protect ah allocated count */
386
387 u32 n_srqs_allocated;
388 spinlock_t n_srqs_lock; /* Protect srqs allocated count */
389
390 int flags;
391 struct rvt_ibport **ports;
392
393 /* QP */
394 struct rvt_qp_ibdev *qp_dev;
395 u32 n_qps_allocated; /* number of QPs allocated for device */
396 u32 n_rc_qps; /* number of RC QPs allocated for device */
397 u32 busy_jiffies; /* timeout scaling based on RC QP count */
398 spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */
399
400 /* memory maps */
401 struct list_head pending_mmaps;
402 spinlock_t mmap_offset_lock; /* protect mmap_offset */
403 u32 mmap_offset;
404 spinlock_t pending_lock; /* protect pending mmap list */
405
406 /* CQ */
407 u32 n_cqs_allocated; /* number of CQs allocated for device */
408 spinlock_t n_cqs_lock; /* protect count of in use cqs */
409
410 /* Multicast */
411 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
412 spinlock_t n_mcast_grps_lock;
413
414 };
415
416 /**
417 * rvt_set_ibdev_name - Craft an IB device name from client info
418 * @rdi: pointer to the client rvt_dev_info structure
419 * @name: client specific name
420 * @unit: client specific unit number.
421 */
rvt_set_ibdev_name(struct rvt_dev_info * rdi,const char * fmt,const char * name,const int unit)422 static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
423 const char *fmt, const char *name,
424 const int unit)
425 {
426 snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
427 }
428
429 /**
430 * rvt_get_ibdev_name - return the IB name
431 * @rdi: rdmavt device
432 *
433 * Return the registered name of the device.
434 */
rvt_get_ibdev_name(const struct rvt_dev_info * rdi)435 static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
436 {
437 return rdi->ibdev.name;
438 }
439
ibpd_to_rvtpd(struct ib_pd * ibpd)440 static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
441 {
442 return container_of(ibpd, struct rvt_pd, ibpd);
443 }
444
ibah_to_rvtah(struct ib_ah * ibah)445 static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
446 {
447 return container_of(ibah, struct rvt_ah, ibah);
448 }
449
ib_to_rvt(struct ib_device * ibdev)450 static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
451 {
452 return container_of(ibdev, struct rvt_dev_info, ibdev);
453 }
454
ibsrq_to_rvtsrq(struct ib_srq * ibsrq)455 static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
456 {
457 return container_of(ibsrq, struct rvt_srq, ibsrq);
458 }
459
ibqp_to_rvtqp(struct ib_qp * ibqp)460 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
461 {
462 return container_of(ibqp, struct rvt_qp, ibqp);
463 }
464
rvt_get_npkeys(struct rvt_dev_info * rdi)465 static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
466 {
467 /*
468 * All ports have same number of pkeys.
469 */
470 return rdi->dparms.npkeys;
471 }
472
473 /*
474 * Return the max atomic suitable for determining
475 * the size of the ack ring buffer in a QP.
476 */
rvt_max_atomic(struct rvt_dev_info * rdi)477 static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
478 {
479 return rdi->dparms.max_rdma_atomic + 1;
480 }
481
482 /*
483 * Return the indexed PKEY from the port PKEY table.
484 */
rvt_get_pkey(struct rvt_dev_info * rdi,int port_index,unsigned index)485 static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
486 int port_index,
487 unsigned index)
488 {
489 if (index >= rvt_get_npkeys(rdi))
490 return 0;
491 else
492 return rdi->ports[port_index]->pkey_table[index];
493 }
494
495 /**
496 * rvt_lookup_qpn - return the QP with the given QPN
497 * @ibp: the ibport
498 * @qpn: the QP number to look up
499 *
500 * The caller must hold the rcu_read_lock(), and keep the lock until
501 * the returned qp is no longer in use.
502 */
503 /* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */
rvt_lookup_qpn(struct rvt_dev_info * rdi,struct rvt_ibport * rvp,u32 qpn)504 static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
505 struct rvt_ibport *rvp,
506 u32 qpn) __must_hold(RCU)
507 {
508 struct rvt_qp *qp = NULL;
509
510 if (unlikely(qpn <= 1)) {
511 qp = rcu_dereference(rvp->qp[qpn]);
512 } else {
513 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
514
515 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
516 qp = rcu_dereference(qp->next))
517 if (qp->ibqp.qp_num == qpn)
518 break;
519 }
520 return qp;
521 }
522
523 /**
524 * rvt_mod_retry_timer - mod a retry timer
525 * @qp - the QP
526 * Modify a potentially already running retry timer
527 */
rvt_mod_retry_timer(struct rvt_qp * qp)528 static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
529 {
530 struct ib_qp *ibqp = &qp->ibqp;
531 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
532
533 lockdep_assert_held(&qp->s_lock);
534 qp->s_flags |= RVT_S_TIMER;
535 /* 4.096 usec. * (1 << qp->timeout) */
536 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
537 rdi->busy_jiffies);
538 }
539
540 struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
541 void rvt_dealloc_device(struct rvt_dev_info *rdi);
542 int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id);
543 void rvt_unregister_device(struct rvt_dev_info *rvd);
544 int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
545 int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
546 int port_index, u16 *pkey_table);
547 int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
548 int access);
549 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
550 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
551 u32 len, u64 vaddr, u32 rkey, int acc);
552 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
553 struct rvt_sge *isge, struct rvt_sge *last_sge,
554 struct ib_sge *sge, int acc);
555 struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
556 u16 lid);
557
558 #endif /* DEF_RDMA_VT_H */
559