1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
8 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
22 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copy
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 * PCIe NTB Linux driver
51 *
52 * Contact Information:
53 * Allen Hubbe <Allen.Hubbe@emc.com>
54 */
55
56 #ifndef _NTB_H_
57 #define _NTB_H_
58
59 #include <linux/completion.h>
60 #include <linux/device.h>
61
62 struct ntb_client;
63 struct ntb_dev;
64 struct pci_dev;
65
66 /**
67 * enum ntb_topo - NTB connection topology
68 * @NTB_TOPO_NONE: Topology is unknown or invalid.
69 * @NTB_TOPO_PRI: On primary side of local ntb.
70 * @NTB_TOPO_SEC: On secondary side of remote ntb.
71 * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
72 * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
73 * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb.
74 * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs
75 */
76 enum ntb_topo {
77 NTB_TOPO_NONE = -1,
78 NTB_TOPO_PRI,
79 NTB_TOPO_SEC,
80 NTB_TOPO_B2B_USD,
81 NTB_TOPO_B2B_DSD,
82 NTB_TOPO_SWITCH,
83 NTB_TOPO_CROSSLINK,
84 };
85
ntb_topo_is_b2b(enum ntb_topo topo)86 static inline int ntb_topo_is_b2b(enum ntb_topo topo)
87 {
88 switch ((int)topo) {
89 case NTB_TOPO_B2B_USD:
90 case NTB_TOPO_B2B_DSD:
91 return 1;
92 }
93 return 0;
94 }
95
ntb_topo_string(enum ntb_topo topo)96 static inline char *ntb_topo_string(enum ntb_topo topo)
97 {
98 switch (topo) {
99 case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
100 case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
101 case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
102 case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
103 case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
104 case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
105 case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK";
106 }
107 return "NTB_TOPO_INVALID";
108 }
109
110 /**
111 * enum ntb_speed - NTB link training speed
112 * @NTB_SPEED_AUTO: Request the max supported speed.
113 * @NTB_SPEED_NONE: Link is not trained to any speed.
114 * @NTB_SPEED_GEN1: Link is trained to gen1 speed.
115 * @NTB_SPEED_GEN2: Link is trained to gen2 speed.
116 * @NTB_SPEED_GEN3: Link is trained to gen3 speed.
117 * @NTB_SPEED_GEN4: Link is trained to gen4 speed.
118 */
119 enum ntb_speed {
120 NTB_SPEED_AUTO = -1,
121 NTB_SPEED_NONE = 0,
122 NTB_SPEED_GEN1 = 1,
123 NTB_SPEED_GEN2 = 2,
124 NTB_SPEED_GEN3 = 3,
125 NTB_SPEED_GEN4 = 4
126 };
127
128 /**
129 * enum ntb_width - NTB link training width
130 * @NTB_WIDTH_AUTO: Request the max supported width.
131 * @NTB_WIDTH_NONE: Link is not trained to any width.
132 * @NTB_WIDTH_1: Link is trained to 1 lane width.
133 * @NTB_WIDTH_2: Link is trained to 2 lane width.
134 * @NTB_WIDTH_4: Link is trained to 4 lane width.
135 * @NTB_WIDTH_8: Link is trained to 8 lane width.
136 * @NTB_WIDTH_12: Link is trained to 12 lane width.
137 * @NTB_WIDTH_16: Link is trained to 16 lane width.
138 * @NTB_WIDTH_32: Link is trained to 32 lane width.
139 */
140 enum ntb_width {
141 NTB_WIDTH_AUTO = -1,
142 NTB_WIDTH_NONE = 0,
143 NTB_WIDTH_1 = 1,
144 NTB_WIDTH_2 = 2,
145 NTB_WIDTH_4 = 4,
146 NTB_WIDTH_8 = 8,
147 NTB_WIDTH_12 = 12,
148 NTB_WIDTH_16 = 16,
149 NTB_WIDTH_32 = 32,
150 };
151
152 /**
153 * enum ntb_default_port - NTB default port number
154 * @NTB_PORT_PRI_USD: Default port of the NTB_TOPO_PRI/NTB_TOPO_B2B_USD
155 * topologies
156 * @NTB_PORT_SEC_DSD: Default port of the NTB_TOPO_SEC/NTB_TOPO_B2B_DSD
157 * topologies
158 */
159 enum ntb_default_port {
160 NTB_PORT_PRI_USD,
161 NTB_PORT_SEC_DSD
162 };
163 #define NTB_DEF_PEER_CNT (1)
164 #define NTB_DEF_PEER_IDX (0)
165
166 /**
167 * struct ntb_client_ops - ntb client operations
168 * @probe: Notify client of a new device.
169 * @remove: Notify client to remove a device.
170 */
171 struct ntb_client_ops {
172 int (*probe)(struct ntb_client *client, struct ntb_dev *ntb);
173 void (*remove)(struct ntb_client *client, struct ntb_dev *ntb);
174 };
175
ntb_client_ops_is_valid(const struct ntb_client_ops * ops)176 static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops)
177 {
178 /* commented callbacks are not required: */
179 return
180 ops->probe &&
181 ops->remove &&
182 1;
183 }
184
185 /**
186 * struct ntb_ctx_ops - ntb driver context operations
187 * @link_event: See ntb_link_event().
188 * @db_event: See ntb_db_event().
189 * @msg_event: See ntb_msg_event().
190 */
191 struct ntb_ctx_ops {
192 void (*link_event)(void *ctx);
193 void (*db_event)(void *ctx, int db_vector);
194 void (*msg_event)(void *ctx);
195 };
196
ntb_ctx_ops_is_valid(const struct ntb_ctx_ops * ops)197 static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
198 {
199 /* commented callbacks are not required: */
200 return
201 /* ops->link_event && */
202 /* ops->db_event && */
203 /* ops->msg_event && */
204 1;
205 }
206
207 /**
208 * struct ntb_ctx_ops - ntb device operations
209 * @port_number: See ntb_port_number().
210 * @peer_port_count: See ntb_peer_port_count().
211 * @peer_port_number: See ntb_peer_port_number().
212 * @peer_port_idx: See ntb_peer_port_idx().
213 * @link_is_up: See ntb_link_is_up().
214 * @link_enable: See ntb_link_enable().
215 * @link_disable: See ntb_link_disable().
216 * @mw_count: See ntb_mw_count().
217 * @mw_get_align: See ntb_mw_get_align().
218 * @mw_set_trans: See ntb_mw_set_trans().
219 * @mw_clear_trans: See ntb_mw_clear_trans().
220 * @peer_mw_count: See ntb_peer_mw_count().
221 * @peer_mw_get_addr: See ntb_peer_mw_get_addr().
222 * @peer_mw_set_trans: See ntb_peer_mw_set_trans().
223 * @peer_mw_clear_trans:See ntb_peer_mw_clear_trans().
224 * @db_is_unsafe: See ntb_db_is_unsafe().
225 * @db_valid_mask: See ntb_db_valid_mask().
226 * @db_vector_count: See ntb_db_vector_count().
227 * @db_vector_mask: See ntb_db_vector_mask().
228 * @db_read: See ntb_db_read().
229 * @db_set: See ntb_db_set().
230 * @db_clear: See ntb_db_clear().
231 * @db_read_mask: See ntb_db_read_mask().
232 * @db_set_mask: See ntb_db_set_mask().
233 * @db_clear_mask: See ntb_db_clear_mask().
234 * @peer_db_addr: See ntb_peer_db_addr().
235 * @peer_db_read: See ntb_peer_db_read().
236 * @peer_db_set: See ntb_peer_db_set().
237 * @peer_db_clear: See ntb_peer_db_clear().
238 * @peer_db_read_mask: See ntb_peer_db_read_mask().
239 * @peer_db_set_mask: See ntb_peer_db_set_mask().
240 * @peer_db_clear_mask: See ntb_peer_db_clear_mask().
241 * @spad_is_unsafe: See ntb_spad_is_unsafe().
242 * @spad_count: See ntb_spad_count().
243 * @spad_read: See ntb_spad_read().
244 * @spad_write: See ntb_spad_write().
245 * @peer_spad_addr: See ntb_peer_spad_addr().
246 * @peer_spad_read: See ntb_peer_spad_read().
247 * @peer_spad_write: See ntb_peer_spad_write().
248 * @msg_count: See ntb_msg_count().
249 * @msg_inbits: See ntb_msg_inbits().
250 * @msg_outbits: See ntb_msg_outbits().
251 * @msg_read_sts: See ntb_msg_read_sts().
252 * @msg_clear_sts: See ntb_msg_clear_sts().
253 * @msg_set_mask: See ntb_msg_set_mask().
254 * @msg_clear_mask: See ntb_msg_clear_mask().
255 * @msg_read: See ntb_msg_read().
256 * @peer_msg_write: See ntb_peer_msg_write().
257 */
258 struct ntb_dev_ops {
259 int (*port_number)(struct ntb_dev *ntb);
260 int (*peer_port_count)(struct ntb_dev *ntb);
261 int (*peer_port_number)(struct ntb_dev *ntb, int pidx);
262 int (*peer_port_idx)(struct ntb_dev *ntb, int port);
263
264 u64 (*link_is_up)(struct ntb_dev *ntb,
265 enum ntb_speed *speed, enum ntb_width *width);
266 int (*link_enable)(struct ntb_dev *ntb,
267 enum ntb_speed max_speed, enum ntb_width max_width);
268 int (*link_disable)(struct ntb_dev *ntb);
269
270 int (*mw_count)(struct ntb_dev *ntb, int pidx);
271 int (*mw_get_align)(struct ntb_dev *ntb, int pidx, int widx,
272 resource_size_t *addr_align,
273 resource_size_t *size_align,
274 resource_size_t *size_max);
275 int (*mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx,
276 dma_addr_t addr, resource_size_t size);
277 int (*mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx);
278 int (*peer_mw_count)(struct ntb_dev *ntb);
279 int (*peer_mw_get_addr)(struct ntb_dev *ntb, int widx,
280 phys_addr_t *base, resource_size_t *size);
281 int (*peer_mw_set_trans)(struct ntb_dev *ntb, int pidx, int widx,
282 u64 addr, resource_size_t size);
283 int (*peer_mw_clear_trans)(struct ntb_dev *ntb, int pidx, int widx);
284
285 int (*db_is_unsafe)(struct ntb_dev *ntb);
286 u64 (*db_valid_mask)(struct ntb_dev *ntb);
287 int (*db_vector_count)(struct ntb_dev *ntb);
288 u64 (*db_vector_mask)(struct ntb_dev *ntb, int db_vector);
289
290 u64 (*db_read)(struct ntb_dev *ntb);
291 int (*db_set)(struct ntb_dev *ntb, u64 db_bits);
292 int (*db_clear)(struct ntb_dev *ntb, u64 db_bits);
293
294 u64 (*db_read_mask)(struct ntb_dev *ntb);
295 int (*db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
296 int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
297
298 int (*peer_db_addr)(struct ntb_dev *ntb,
299 phys_addr_t *db_addr, resource_size_t *db_size);
300 u64 (*peer_db_read)(struct ntb_dev *ntb);
301 int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
302 int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
303
304 u64 (*peer_db_read_mask)(struct ntb_dev *ntb);
305 int (*peer_db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
306 int (*peer_db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
307
308 int (*spad_is_unsafe)(struct ntb_dev *ntb);
309 int (*spad_count)(struct ntb_dev *ntb);
310
311 u32 (*spad_read)(struct ntb_dev *ntb, int sidx);
312 int (*spad_write)(struct ntb_dev *ntb, int sidx, u32 val);
313
314 int (*peer_spad_addr)(struct ntb_dev *ntb, int pidx, int sidx,
315 phys_addr_t *spad_addr);
316 u32 (*peer_spad_read)(struct ntb_dev *ntb, int pidx, int sidx);
317 int (*peer_spad_write)(struct ntb_dev *ntb, int pidx, int sidx,
318 u32 val);
319
320 int (*msg_count)(struct ntb_dev *ntb);
321 u64 (*msg_inbits)(struct ntb_dev *ntb);
322 u64 (*msg_outbits)(struct ntb_dev *ntb);
323 u64 (*msg_read_sts)(struct ntb_dev *ntb);
324 int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits);
325 int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits);
326 int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
327 u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx);
328 int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg);
329 };
330
ntb_dev_ops_is_valid(const struct ntb_dev_ops * ops)331 static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
332 {
333 /* commented callbacks are not required: */
334 return
335 /* Port operations are required for multiport devices */
336 !ops->peer_port_count == !ops->port_number &&
337 !ops->peer_port_number == !ops->port_number &&
338 !ops->peer_port_idx == !ops->port_number &&
339
340 /* Link operations are required */
341 ops->link_is_up &&
342 ops->link_enable &&
343 ops->link_disable &&
344
345 /* One or both MW interfaces should be developed */
346 ops->mw_count &&
347 ops->mw_get_align &&
348 (ops->mw_set_trans ||
349 ops->peer_mw_set_trans) &&
350 /* ops->mw_clear_trans && */
351 ops->peer_mw_count &&
352 ops->peer_mw_get_addr &&
353 /* ops->peer_mw_clear_trans && */
354
355 /* Doorbell operations are mostly required */
356 /* ops->db_is_unsafe && */
357 ops->db_valid_mask &&
358 /* both set, or both unset */
359 (!ops->db_vector_count == !ops->db_vector_mask) &&
360 ops->db_read &&
361 /* ops->db_set && */
362 ops->db_clear &&
363 /* ops->db_read_mask && */
364 ops->db_set_mask &&
365 ops->db_clear_mask &&
366 /* ops->peer_db_addr && */
367 /* ops->peer_db_read && */
368 ops->peer_db_set &&
369 /* ops->peer_db_clear && */
370 /* ops->peer_db_read_mask && */
371 /* ops->peer_db_set_mask && */
372 /* ops->peer_db_clear_mask && */
373
374 /* Scrachpads interface is optional */
375 /* !ops->spad_is_unsafe == !ops->spad_count && */
376 !ops->spad_read == !ops->spad_count &&
377 !ops->spad_write == !ops->spad_count &&
378 /* !ops->peer_spad_addr == !ops->spad_count && */
379 /* !ops->peer_spad_read == !ops->spad_count && */
380 !ops->peer_spad_write == !ops->spad_count &&
381
382 /* Messaging interface is optional */
383 !ops->msg_inbits == !ops->msg_count &&
384 !ops->msg_outbits == !ops->msg_count &&
385 !ops->msg_read_sts == !ops->msg_count &&
386 !ops->msg_clear_sts == !ops->msg_count &&
387 /* !ops->msg_set_mask == !ops->msg_count && */
388 /* !ops->msg_clear_mask == !ops->msg_count && */
389 !ops->msg_read == !ops->msg_count &&
390 !ops->peer_msg_write == !ops->msg_count &&
391 1;
392 }
393
394 /**
395 * struct ntb_client - client interested in ntb devices
396 * @drv: Linux driver object.
397 * @ops: See &ntb_client_ops.
398 */
399 struct ntb_client {
400 struct device_driver drv;
401 const struct ntb_client_ops ops;
402 };
403 #define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
404
405 /**
406 * struct ntb_device - ntb device
407 * @dev: Linux device object.
408 * @pdev: PCI device entry of the ntb.
409 * @topo: Detected topology of the ntb.
410 * @ops: See &ntb_dev_ops.
411 * @ctx: See &ntb_ctx_ops.
412 * @ctx_ops: See &ntb_ctx_ops.
413 */
414 struct ntb_dev {
415 struct device dev;
416 struct pci_dev *pdev;
417 enum ntb_topo topo;
418 const struct ntb_dev_ops *ops;
419 void *ctx;
420 const struct ntb_ctx_ops *ctx_ops;
421
422 /* private: */
423
424 /* synchronize setting, clearing, and calling ctx_ops */
425 spinlock_t ctx_lock;
426 /* block unregister until device is fully released */
427 struct completion released;
428 };
429 #define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
430
431 /**
432 * ntb_register_client() - register a client for interest in ntb devices
433 * @client: Client context.
434 *
435 * The client will be added to the list of clients interested in ntb devices.
436 * The client will be notified of any ntb devices that are not already
437 * associated with a client, or if ntb devices are registered later.
438 *
439 * Return: Zero if the client is registered, otherwise an error number.
440 */
441 #define ntb_register_client(client) \
442 __ntb_register_client((client), THIS_MODULE, KBUILD_MODNAME)
443
444 int __ntb_register_client(struct ntb_client *client, struct module *mod,
445 const char *mod_name);
446
447 /**
448 * ntb_unregister_client() - unregister a client for interest in ntb devices
449 * @client: Client context.
450 *
451 * The client will be removed from the list of clients interested in ntb
452 * devices. If any ntb devices are associated with the client, the client will
453 * be notified to remove those devices.
454 */
455 void ntb_unregister_client(struct ntb_client *client);
456
457 #define module_ntb_client(__ntb_client) \
458 module_driver(__ntb_client, ntb_register_client, \
459 ntb_unregister_client)
460
461 /**
462 * ntb_register_device() - register a ntb device
463 * @ntb: NTB device context.
464 *
465 * The device will be added to the list of ntb devices. If any clients are
466 * interested in ntb devices, each client will be notified of the ntb device,
467 * until at most one client accepts the device.
468 *
469 * Return: Zero if the device is registered, otherwise an error number.
470 */
471 int ntb_register_device(struct ntb_dev *ntb);
472
473 /**
474 * ntb_register_device() - unregister a ntb device
475 * @ntb: NTB device context.
476 *
477 * The device will be removed from the list of ntb devices. If the ntb device
478 * is associated with a client, the client will be notified to remove the
479 * device.
480 */
481 void ntb_unregister_device(struct ntb_dev *ntb);
482
483 /**
484 * ntb_set_ctx() - associate a driver context with an ntb device
485 * @ntb: NTB device context.
486 * @ctx: Driver context.
487 * @ctx_ops: Driver context operations.
488 *
489 * Associate a driver context and operations with a ntb device. The context is
490 * provided by the client driver, and the driver may associate a different
491 * context with each ntb device.
492 *
493 * Return: Zero if the context is associated, otherwise an error number.
494 */
495 int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
496 const struct ntb_ctx_ops *ctx_ops);
497
498 /**
499 * ntb_clear_ctx() - disassociate any driver context from an ntb device
500 * @ntb: NTB device context.
501 *
502 * Clear any association that may exist between a driver context and the ntb
503 * device.
504 */
505 void ntb_clear_ctx(struct ntb_dev *ntb);
506
507 /**
508 * ntb_link_event() - notify driver context of a change in link status
509 * @ntb: NTB device context.
510 *
511 * Notify the driver context that the link status may have changed. The driver
512 * should call ntb_link_is_up() to get the current status.
513 */
514 void ntb_link_event(struct ntb_dev *ntb);
515
516 /**
517 * ntb_db_event() - notify driver context of a doorbell event
518 * @ntb: NTB device context.
519 * @vector: Interrupt vector number.
520 *
521 * Notify the driver context of a doorbell event. If hardware supports
522 * multiple interrupt vectors for doorbells, the vector number indicates which
523 * vector received the interrupt. The vector number is relative to the first
524 * vector used for doorbells, starting at zero, and must be less than
525 * ntb_db_vector_count(). The driver may call ntb_db_read() to check which
526 * doorbell bits need service, and ntb_db_vector_mask() to determine which of
527 * those bits are associated with the vector number.
528 */
529 void ntb_db_event(struct ntb_dev *ntb, int vector);
530
531 /**
532 * ntb_msg_event() - notify driver context of a message event
533 * @ntb: NTB device context.
534 *
535 * Notify the driver context of a message event. If hardware supports
536 * message registers, this event indicates, that a new message arrived in
537 * some incoming message register or last sent message couldn't be delivered.
538 * The events can be masked/unmasked by the methods ntb_msg_set_mask() and
539 * ntb_msg_clear_mask().
540 */
541 void ntb_msg_event(struct ntb_dev *ntb);
542
543 /**
544 * ntb_default_port_number() - get the default local port number
545 * @ntb: NTB device context.
546 *
547 * If hardware driver doesn't specify port_number() callback method, the NTB
548 * is considered with just two ports. So this method returns default local
549 * port number in compliance with topology.
550 *
551 * NOTE Don't call this method directly. The ntb_port_number() function should
552 * be used instead.
553 *
554 * Return: the default local port number
555 */
556 int ntb_default_port_number(struct ntb_dev *ntb);
557
558 /**
559 * ntb_default_port_count() - get the default number of peer device ports
560 * @ntb: NTB device context.
561 *
562 * By default hardware driver supports just one peer device.
563 *
564 * NOTE Don't call this method directly. The ntb_peer_port_count() function
565 * should be used instead.
566 *
567 * Return: the default number of peer ports
568 */
569 int ntb_default_peer_port_count(struct ntb_dev *ntb);
570
571 /**
572 * ntb_default_peer_port_number() - get the default peer port by given index
573 * @ntb: NTB device context.
574 * @idx: Peer port index (should not differ from zero).
575 *
576 * By default hardware driver supports just one peer device, so this method
577 * shall return the corresponding value from enum ntb_default_port.
578 *
579 * NOTE Don't call this method directly. The ntb_peer_port_number() function
580 * should be used instead.
581 *
582 * Return: the peer device port or negative value indicating an error
583 */
584 int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx);
585
586 /**
587 * ntb_default_peer_port_idx() - get the default peer device port index by
588 * given port number
589 * @ntb: NTB device context.
590 * @port: Peer port number (should be one of enum ntb_default_port).
591 *
592 * By default hardware driver supports just one peer device, so while
593 * specified port-argument indicates peer port from enum ntb_default_port,
594 * the return value shall be zero.
595 *
596 * NOTE Don't call this method directly. The ntb_peer_port_idx() function
597 * should be used instead.
598 *
599 * Return: the peer port index or negative value indicating an error
600 */
601 int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port);
602
603 /**
604 * ntb_port_number() - get the local port number
605 * @ntb: NTB device context.
606 *
607 * Hardware must support at least simple two-ports ntb connection
608 *
609 * Return: the local port number
610 */
ntb_port_number(struct ntb_dev * ntb)611 static inline int ntb_port_number(struct ntb_dev *ntb)
612 {
613 if (!ntb->ops->port_number)
614 return ntb_default_port_number(ntb);
615
616 return ntb->ops->port_number(ntb);
617 }
618
619 /**
620 * ntb_peer_port_count() - get the number of peer device ports
621 * @ntb: NTB device context.
622 *
623 * Hardware may support an access to memory of several remote domains
624 * over multi-port NTB devices. This method returns the number of peers,
625 * local device can have shared memory with.
626 *
627 * Return: the number of peer ports
628 */
ntb_peer_port_count(struct ntb_dev * ntb)629 static inline int ntb_peer_port_count(struct ntb_dev *ntb)
630 {
631 if (!ntb->ops->peer_port_count)
632 return ntb_default_peer_port_count(ntb);
633
634 return ntb->ops->peer_port_count(ntb);
635 }
636
637 /**
638 * ntb_peer_port_number() - get the peer port by given index
639 * @ntb: NTB device context.
640 * @pidx: Peer port index.
641 *
642 * Peer ports are continuously enumerated by NTB API logic, so this method
643 * lets to retrieve port real number by its index.
644 *
645 * Return: the peer device port or negative value indicating an error
646 */
ntb_peer_port_number(struct ntb_dev * ntb,int pidx)647 static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
648 {
649 if (!ntb->ops->peer_port_number)
650 return ntb_default_peer_port_number(ntb, pidx);
651
652 return ntb->ops->peer_port_number(ntb, pidx);
653 }
654
655 /**
656 * ntb_peer_port_idx() - get the peer device port index by given port number
657 * @ntb: NTB device context.
658 * @port: Peer port number.
659 *
660 * Inverse operation of ntb_peer_port_number(), so one can get port index
661 * by specified port number.
662 *
663 * Return: the peer port index or negative value indicating an error
664 */
ntb_peer_port_idx(struct ntb_dev * ntb,int port)665 static inline int ntb_peer_port_idx(struct ntb_dev *ntb, int port)
666 {
667 if (!ntb->ops->peer_port_idx)
668 return ntb_default_peer_port_idx(ntb, port);
669
670 return ntb->ops->peer_port_idx(ntb, port);
671 }
672
673 /**
674 * ntb_link_is_up() - get the current ntb link state
675 * @ntb: NTB device context.
676 * @speed: OUT - The link speed expressed as PCIe generation number.
677 * @width: OUT - The link width expressed as the number of PCIe lanes.
678 *
679 * Get the current state of the ntb link. It is recommended to query the link
680 * state once after every link event. It is safe to query the link state in
681 * the context of the link event callback.
682 *
683 * Return: bitfield of indexed ports link state: bit is set/cleared if the
684 * link is up/down respectively.
685 */
ntb_link_is_up(struct ntb_dev * ntb,enum ntb_speed * speed,enum ntb_width * width)686 static inline u64 ntb_link_is_up(struct ntb_dev *ntb,
687 enum ntb_speed *speed, enum ntb_width *width)
688 {
689 return ntb->ops->link_is_up(ntb, speed, width);
690 }
691
692 /**
693 * ntb_link_enable() - enable the local port ntb connection
694 * @ntb: NTB device context.
695 * @max_speed: The maximum link speed expressed as PCIe generation number.
696 * @max_width: The maximum link width expressed as the number of PCIe lanes.
697 *
698 * Enable the NTB/PCIe link on the local or remote (for bridge-to-bridge
699 * topology) side of the bridge. If it's supported the ntb device should train
700 * the link to its maximum speed and width, or the requested speed and width,
701 * whichever is smaller. Some hardware doesn't support PCIe link training, so
702 * the last two arguments will be ignored then.
703 *
704 * Return: Zero on success, otherwise an error number.
705 */
ntb_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)706 static inline int ntb_link_enable(struct ntb_dev *ntb,
707 enum ntb_speed max_speed,
708 enum ntb_width max_width)
709 {
710 return ntb->ops->link_enable(ntb, max_speed, max_width);
711 }
712
713 /**
714 * ntb_link_disable() - disable the local port ntb connection
715 * @ntb: NTB device context.
716 *
717 * Disable the link on the local or remote (for b2b topology) of the ntb.
718 * The ntb device should disable the link. Returning from this call must
719 * indicate that a barrier has passed, though with no more writes may pass in
720 * either direction across the link, except if this call returns an error
721 * number.
722 *
723 * Return: Zero on success, otherwise an error number.
724 */
ntb_link_disable(struct ntb_dev * ntb)725 static inline int ntb_link_disable(struct ntb_dev *ntb)
726 {
727 return ntb->ops->link_disable(ntb);
728 }
729
730 /**
731 * ntb_mw_count() - get the number of inbound memory windows, which could
732 * be created for a specified peer device
733 * @ntb: NTB device context.
734 * @pidx: Port index of peer device.
735 *
736 * Hardware and topology may support a different number of memory windows.
737 * Moreover different peer devices can support different number of memory
738 * windows. Simply speaking this method returns the number of possible inbound
739 * memory windows to share with specified peer device. Note: this may return
740 * zero if the link is not up yet.
741 *
742 * Return: the number of memory windows.
743 */
ntb_mw_count(struct ntb_dev * ntb,int pidx)744 static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
745 {
746 return ntb->ops->mw_count(ntb, pidx);
747 }
748
749 /**
750 * ntb_mw_get_align() - get the restriction parameters of inbound memory window
751 * @ntb: NTB device context.
752 * @pidx: Port index of peer device.
753 * @widx: Memory window index.
754 * @addr_align: OUT - the base alignment for translating the memory window
755 * @size_align: OUT - the size alignment for translating the memory window
756 * @size_max: OUT - the maximum size of the memory window
757 *
758 * Get the alignments of an inbound memory window with specified index.
759 * NULL may be given for any output parameter if the value is not needed.
760 * The alignment and size parameters may be used for allocation of proper
761 * shared memory. Note: this must only be called when the link is up.
762 *
763 * Return: Zero on success, otherwise a negative error number.
764 */
ntb_mw_get_align(struct ntb_dev * ntb,int pidx,int widx,resource_size_t * addr_align,resource_size_t * size_align,resource_size_t * size_max)765 static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
766 resource_size_t *addr_align,
767 resource_size_t *size_align,
768 resource_size_t *size_max)
769 {
770 if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx)))
771 return -ENOTCONN;
772
773 return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
774 size_max);
775 }
776
777 /**
778 * ntb_mw_set_trans() - set the translation of an inbound memory window
779 * @ntb: NTB device context.
780 * @pidx: Port index of peer device.
781 * @widx: Memory window index.
782 * @addr: The dma address of local memory to expose to the peer.
783 * @size: The size of the local memory to expose to the peer.
784 *
785 * Set the translation of a memory window. The peer may access local memory
786 * through the window starting at the address, up to the size. The address
787 * and size must be aligned in compliance with restrictions of
788 * ntb_mw_get_align(). The region size should not exceed the size_max parameter
789 * of that method.
790 *
791 * This method may not be implemented due to the hardware specific memory
792 * windows interface.
793 *
794 * Return: Zero on success, otherwise an error number.
795 */
ntb_mw_set_trans(struct ntb_dev * ntb,int pidx,int widx,dma_addr_t addr,resource_size_t size)796 static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
797 dma_addr_t addr, resource_size_t size)
798 {
799 if (!ntb->ops->mw_set_trans)
800 return 0;
801
802 return ntb->ops->mw_set_trans(ntb, pidx, widx, addr, size);
803 }
804
805 /**
806 * ntb_mw_clear_trans() - clear the translation address of an inbound memory
807 * window
808 * @ntb: NTB device context.
809 * @pidx: Port index of peer device.
810 * @widx: Memory window index.
811 *
812 * Clear the translation of an inbound memory window. The peer may no longer
813 * access local memory through the window.
814 *
815 * Return: Zero on success, otherwise an error number.
816 */
ntb_mw_clear_trans(struct ntb_dev * ntb,int pidx,int widx)817 static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int pidx, int widx)
818 {
819 if (!ntb->ops->mw_clear_trans)
820 return ntb_mw_set_trans(ntb, pidx, widx, 0, 0);
821
822 return ntb->ops->mw_clear_trans(ntb, pidx, widx);
823 }
824
825 /**
826 * ntb_peer_mw_count() - get the number of outbound memory windows, which could
827 * be mapped to access a shared memory
828 * @ntb: NTB device context.
829 *
830 * Hardware and topology may support a different number of memory windows.
831 * This method returns the number of outbound memory windows supported by
832 * local device.
833 *
834 * Return: the number of memory windows.
835 */
ntb_peer_mw_count(struct ntb_dev * ntb)836 static inline int ntb_peer_mw_count(struct ntb_dev *ntb)
837 {
838 return ntb->ops->peer_mw_count(ntb);
839 }
840
841 /**
842 * ntb_peer_mw_get_addr() - get map address of an outbound memory window
843 * @ntb: NTB device context.
844 * @widx: Memory window index (within ntb_peer_mw_count() return value).
845 * @base: OUT - the base address of mapping region.
846 * @size: OUT - the size of mapping region.
847 *
848 * Get base and size of memory region to map. NULL may be given for any output
849 * parameter if the value is not needed. The base and size may be used for
850 * mapping the memory window, to access the peer memory.
851 *
852 * Return: Zero on success, otherwise a negative error number.
853 */
ntb_peer_mw_get_addr(struct ntb_dev * ntb,int widx,phys_addr_t * base,resource_size_t * size)854 static inline int ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx,
855 phys_addr_t *base, resource_size_t *size)
856 {
857 return ntb->ops->peer_mw_get_addr(ntb, widx, base, size);
858 }
859
860 /**
861 * ntb_peer_mw_set_trans() - set a translation address of a memory window
862 * retrieved from a peer device
863 * @ntb: NTB device context.
864 * @pidx: Port index of peer device the translation address received from.
865 * @widx: Memory window index.
866 * @addr: The dma address of the shared memory to access.
867 * @size: The size of the shared memory to access.
868 *
869 * Set the translation of an outbound memory window. The local device may
870 * access shared memory allocated by a peer device sent the address.
871 *
872 * This method may not be implemented due to the hardware specific memory
873 * windows interface, so a translation address can be only set on the side,
874 * where shared memory (inbound memory windows) is allocated.
875 *
876 * Return: Zero on success, otherwise an error number.
877 */
ntb_peer_mw_set_trans(struct ntb_dev * ntb,int pidx,int widx,u64 addr,resource_size_t size)878 static inline int ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
879 u64 addr, resource_size_t size)
880 {
881 if (!ntb->ops->peer_mw_set_trans)
882 return 0;
883
884 return ntb->ops->peer_mw_set_trans(ntb, pidx, widx, addr, size);
885 }
886
887 /**
888 * ntb_peer_mw_clear_trans() - clear the translation address of an outbound
889 * memory window
890 * @ntb: NTB device context.
891 * @pidx: Port index of peer device.
892 * @widx: Memory window index.
893 *
894 * Clear the translation of a outbound memory window. The local device may no
895 * longer access a shared memory through the window.
896 *
897 * This method may not be implemented due to the hardware specific memory
898 * windows interface.
899 *
900 * Return: Zero on success, otherwise an error number.
901 */
ntb_peer_mw_clear_trans(struct ntb_dev * ntb,int pidx,int widx)902 static inline int ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
903 int widx)
904 {
905 if (!ntb->ops->peer_mw_clear_trans)
906 return ntb_peer_mw_set_trans(ntb, pidx, widx, 0, 0);
907
908 return ntb->ops->peer_mw_clear_trans(ntb, pidx, widx);
909 }
910
911 /**
912 * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell
913 * @ntb: NTB device context.
914 *
915 * It is possible for some ntb hardware to be affected by errata. Hardware
916 * drivers can advise clients to avoid using doorbells. Clients may ignore
917 * this advice, though caution is recommended.
918 *
919 * Return: Zero if it is safe to use doorbells, or One if it is not safe.
920 */
ntb_db_is_unsafe(struct ntb_dev * ntb)921 static inline int ntb_db_is_unsafe(struct ntb_dev *ntb)
922 {
923 if (!ntb->ops->db_is_unsafe)
924 return 0;
925
926 return ntb->ops->db_is_unsafe(ntb);
927 }
928
929 /**
930 * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
931 * @ntb: NTB device context.
932 *
933 * Hardware may support different number or arrangement of doorbell bits.
934 *
935 * Return: A mask of doorbell bits supported by the ntb.
936 */
ntb_db_valid_mask(struct ntb_dev * ntb)937 static inline u64 ntb_db_valid_mask(struct ntb_dev *ntb)
938 {
939 return ntb->ops->db_valid_mask(ntb);
940 }
941
942 /**
943 * ntb_db_vector_count() - get the number of doorbell interrupt vectors
944 * @ntb: NTB device context.
945 *
946 * Hardware may support different number of interrupt vectors.
947 *
948 * Return: The number of doorbell interrupt vectors.
949 */
ntb_db_vector_count(struct ntb_dev * ntb)950 static inline int ntb_db_vector_count(struct ntb_dev *ntb)
951 {
952 if (!ntb->ops->db_vector_count)
953 return 1;
954
955 return ntb->ops->db_vector_count(ntb);
956 }
957
958 /**
959 * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
960 * @ntb: NTB device context.
961 * @vector: Doorbell vector number.
962 *
963 * Each interrupt vector may have a different number or arrangement of bits.
964 *
965 * Return: A mask of doorbell bits serviced by a vector.
966 */
ntb_db_vector_mask(struct ntb_dev * ntb,int vector)967 static inline u64 ntb_db_vector_mask(struct ntb_dev *ntb, int vector)
968 {
969 if (!ntb->ops->db_vector_mask)
970 return ntb_db_valid_mask(ntb);
971
972 return ntb->ops->db_vector_mask(ntb, vector);
973 }
974
975 /**
976 * ntb_db_read() - read the local doorbell register
977 * @ntb: NTB device context.
978 *
979 * Read the local doorbell register, and return the bits that are set.
980 *
981 * Return: The bits currently set in the local doorbell register.
982 */
ntb_db_read(struct ntb_dev * ntb)983 static inline u64 ntb_db_read(struct ntb_dev *ntb)
984 {
985 return ntb->ops->db_read(ntb);
986 }
987
988 /**
989 * ntb_db_set() - set bits in the local doorbell register
990 * @ntb: NTB device context.
991 * @db_bits: Doorbell bits to set.
992 *
993 * Set bits in the local doorbell register, which may generate a local doorbell
994 * interrupt. Bits that were already set must remain set.
995 *
996 * This is unusual, and hardware may not support it.
997 *
998 * Return: Zero on success, otherwise an error number.
999 */
ntb_db_set(struct ntb_dev * ntb,u64 db_bits)1000 static inline int ntb_db_set(struct ntb_dev *ntb, u64 db_bits)
1001 {
1002 if (!ntb->ops->db_set)
1003 return -EINVAL;
1004
1005 return ntb->ops->db_set(ntb, db_bits);
1006 }
1007
1008 /**
1009 * ntb_db_clear() - clear bits in the local doorbell register
1010 * @ntb: NTB device context.
1011 * @db_bits: Doorbell bits to clear.
1012 *
1013 * Clear bits in the local doorbell register, arming the bits for the next
1014 * doorbell.
1015 *
1016 * Return: Zero on success, otherwise an error number.
1017 */
ntb_db_clear(struct ntb_dev * ntb,u64 db_bits)1018 static inline int ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1019 {
1020 return ntb->ops->db_clear(ntb, db_bits);
1021 }
1022
1023 /**
1024 * ntb_db_read_mask() - read the local doorbell mask
1025 * @ntb: NTB device context.
1026 *
1027 * Read the local doorbell mask register, and return the bits that are set.
1028 *
1029 * This is unusual, though hardware is likely to support it.
1030 *
1031 * Return: The bits currently set in the local doorbell mask register.
1032 */
ntb_db_read_mask(struct ntb_dev * ntb)1033 static inline u64 ntb_db_read_mask(struct ntb_dev *ntb)
1034 {
1035 if (!ntb->ops->db_read_mask)
1036 return 0;
1037
1038 return ntb->ops->db_read_mask(ntb);
1039 }
1040
1041 /**
1042 * ntb_db_set_mask() - set bits in the local doorbell mask
1043 * @ntb: NTB device context.
1044 * @db_bits: Doorbell mask bits to set.
1045 *
1046 * Set bits in the local doorbell mask register, preventing doorbell interrupts
1047 * from being generated for those doorbell bits. Bits that were already set
1048 * must remain set.
1049 *
1050 * Return: Zero on success, otherwise an error number.
1051 */
ntb_db_set_mask(struct ntb_dev * ntb,u64 db_bits)1052 static inline int ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1053 {
1054 return ntb->ops->db_set_mask(ntb, db_bits);
1055 }
1056
1057 /**
1058 * ntb_db_clear_mask() - clear bits in the local doorbell mask
1059 * @ntb: NTB device context.
1060 * @db_bits: Doorbell bits to clear.
1061 *
1062 * Clear bits in the local doorbell mask register, allowing doorbell interrupts
1063 * from being generated for those doorbell bits. If a doorbell bit is already
1064 * set at the time the mask is cleared, and the corresponding mask bit is
1065 * changed from set to clear, then the ntb driver must ensure that
1066 * ntb_db_event() is called. If the hardware does not generate the interrupt
1067 * on clearing the mask bit, then the driver must call ntb_db_event() anyway.
1068 *
1069 * Return: Zero on success, otherwise an error number.
1070 */
ntb_db_clear_mask(struct ntb_dev * ntb,u64 db_bits)1071 static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1072 {
1073 return ntb->ops->db_clear_mask(ntb, db_bits);
1074 }
1075
1076 /**
1077 * ntb_peer_db_addr() - address and size of the peer doorbell register
1078 * @ntb: NTB device context.
1079 * @db_addr: OUT - The address of the peer doorbell register.
1080 * @db_size: OUT - The number of bytes to write the peer doorbell register.
1081 *
1082 * Return the address of the peer doorbell register. This may be used, for
1083 * example, by drivers that offload memory copy operations to a dma engine.
1084 * The drivers may wish to ring the peer doorbell at the completion of memory
1085 * copy operations. For efficiency, and to simplify ordering of operations
1086 * between the dma memory copies and the ringing doorbell, the driver may
1087 * append one additional dma memory copy with the doorbell register as the
1088 * destination, after the memory copy operations.
1089 *
1090 * Return: Zero on success, otherwise an error number.
1091 */
ntb_peer_db_addr(struct ntb_dev * ntb,phys_addr_t * db_addr,resource_size_t * db_size)1092 static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
1093 phys_addr_t *db_addr,
1094 resource_size_t *db_size)
1095 {
1096 if (!ntb->ops->peer_db_addr)
1097 return -EINVAL;
1098
1099 return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
1100 }
1101
1102 /**
1103 * ntb_peer_db_read() - read the peer doorbell register
1104 * @ntb: NTB device context.
1105 *
1106 * Read the peer doorbell register, and return the bits that are set.
1107 *
1108 * This is unusual, and hardware may not support it.
1109 *
1110 * Return: The bits currently set in the peer doorbell register.
1111 */
ntb_peer_db_read(struct ntb_dev * ntb)1112 static inline u64 ntb_peer_db_read(struct ntb_dev *ntb)
1113 {
1114 if (!ntb->ops->peer_db_read)
1115 return 0;
1116
1117 return ntb->ops->peer_db_read(ntb);
1118 }
1119
1120 /**
1121 * ntb_peer_db_set() - set bits in the peer doorbell register
1122 * @ntb: NTB device context.
1123 * @db_bits: Doorbell bits to set.
1124 *
1125 * Set bits in the peer doorbell register, which may generate a peer doorbell
1126 * interrupt. Bits that were already set must remain set.
1127 *
1128 * Return: Zero on success, otherwise an error number.
1129 */
ntb_peer_db_set(struct ntb_dev * ntb,u64 db_bits)1130 static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1131 {
1132 return ntb->ops->peer_db_set(ntb, db_bits);
1133 }
1134
1135 /**
1136 * ntb_peer_db_clear() - clear bits in the peer doorbell register
1137 * @ntb: NTB device context.
1138 * @db_bits: Doorbell bits to clear.
1139 *
1140 * Clear bits in the peer doorbell register, arming the bits for the next
1141 * doorbell.
1142 *
1143 * This is unusual, and hardware may not support it.
1144 *
1145 * Return: Zero on success, otherwise an error number.
1146 */
ntb_peer_db_clear(struct ntb_dev * ntb,u64 db_bits)1147 static inline int ntb_peer_db_clear(struct ntb_dev *ntb, u64 db_bits)
1148 {
1149 if (!ntb->ops->db_clear)
1150 return -EINVAL;
1151
1152 return ntb->ops->peer_db_clear(ntb, db_bits);
1153 }
1154
1155 /**
1156 * ntb_peer_db_read_mask() - read the peer doorbell mask
1157 * @ntb: NTB device context.
1158 *
1159 * Read the peer doorbell mask register, and return the bits that are set.
1160 *
1161 * This is unusual, and hardware may not support it.
1162 *
1163 * Return: The bits currently set in the peer doorbell mask register.
1164 */
ntb_peer_db_read_mask(struct ntb_dev * ntb)1165 static inline u64 ntb_peer_db_read_mask(struct ntb_dev *ntb)
1166 {
1167 if (!ntb->ops->db_read_mask)
1168 return 0;
1169
1170 return ntb->ops->peer_db_read_mask(ntb);
1171 }
1172
1173 /**
1174 * ntb_peer_db_set_mask() - set bits in the peer doorbell mask
1175 * @ntb: NTB device context.
1176 * @db_bits: Doorbell mask bits to set.
1177 *
1178 * Set bits in the peer doorbell mask register, preventing doorbell interrupts
1179 * from being generated for those doorbell bits. Bits that were already set
1180 * must remain set.
1181 *
1182 * This is unusual, and hardware may not support it.
1183 *
1184 * Return: Zero on success, otherwise an error number.
1185 */
ntb_peer_db_set_mask(struct ntb_dev * ntb,u64 db_bits)1186 static inline int ntb_peer_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1187 {
1188 if (!ntb->ops->db_set_mask)
1189 return -EINVAL;
1190
1191 return ntb->ops->peer_db_set_mask(ntb, db_bits);
1192 }
1193
1194 /**
1195 * ntb_peer_db_clear_mask() - clear bits in the peer doorbell mask
1196 * @ntb: NTB device context.
1197 * @db_bits: Doorbell bits to clear.
1198 *
1199 * Clear bits in the peer doorbell mask register, allowing doorbell interrupts
1200 * from being generated for those doorbell bits. If the hardware does not
1201 * generate the interrupt on clearing the mask bit, then the driver should not
1202 * implement this function!
1203 *
1204 * This is unusual, and hardware may not support it.
1205 *
1206 * Return: Zero on success, otherwise an error number.
1207 */
ntb_peer_db_clear_mask(struct ntb_dev * ntb,u64 db_bits)1208 static inline int ntb_peer_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1209 {
1210 if (!ntb->ops->db_clear_mask)
1211 return -EINVAL;
1212
1213 return ntb->ops->peer_db_clear_mask(ntb, db_bits);
1214 }
1215
1216 /**
1217 * ntb_spad_is_unsafe() - check if it is safe to use the hardware scratchpads
1218 * @ntb: NTB device context.
1219 *
1220 * It is possible for some ntb hardware to be affected by errata. Hardware
1221 * drivers can advise clients to avoid using scratchpads. Clients may ignore
1222 * this advice, though caution is recommended.
1223 *
1224 * Return: Zero if it is safe to use scratchpads, or One if it is not safe.
1225 */
ntb_spad_is_unsafe(struct ntb_dev * ntb)1226 static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
1227 {
1228 if (!ntb->ops->spad_is_unsafe)
1229 return 0;
1230
1231 return ntb->ops->spad_is_unsafe(ntb);
1232 }
1233
1234 /**
1235 * ntb_spad_count() - get the number of scratchpads
1236 * @ntb: NTB device context.
1237 *
1238 * Hardware and topology may support a different number of scratchpads.
1239 * Although it must be the same for all ports per NTB device.
1240 *
1241 * Return: the number of scratchpads.
1242 */
ntb_spad_count(struct ntb_dev * ntb)1243 static inline int ntb_spad_count(struct ntb_dev *ntb)
1244 {
1245 if (!ntb->ops->spad_count)
1246 return 0;
1247
1248 return ntb->ops->spad_count(ntb);
1249 }
1250
1251 /**
1252 * ntb_spad_read() - read the local scratchpad register
1253 * @ntb: NTB device context.
1254 * @sidx: Scratchpad index.
1255 *
1256 * Read the local scratchpad register, and return the value.
1257 *
1258 * Return: The value of the local scratchpad register.
1259 */
ntb_spad_read(struct ntb_dev * ntb,int sidx)1260 static inline u32 ntb_spad_read(struct ntb_dev *ntb, int sidx)
1261 {
1262 if (!ntb->ops->spad_read)
1263 return ~(u32)0;
1264
1265 return ntb->ops->spad_read(ntb, sidx);
1266 }
1267
1268 /**
1269 * ntb_spad_write() - write the local scratchpad register
1270 * @ntb: NTB device context.
1271 * @sidx: Scratchpad index.
1272 * @val: Scratchpad value.
1273 *
1274 * Write the value to the local scratchpad register.
1275 *
1276 * Return: Zero on success, otherwise an error number.
1277 */
ntb_spad_write(struct ntb_dev * ntb,int sidx,u32 val)1278 static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
1279 {
1280 if (!ntb->ops->spad_write)
1281 return -EINVAL;
1282
1283 return ntb->ops->spad_write(ntb, sidx, val);
1284 }
1285
1286 /**
1287 * ntb_peer_spad_addr() - address of the peer scratchpad register
1288 * @ntb: NTB device context.
1289 * @pidx: Port index of peer device.
1290 * @sidx: Scratchpad index.
1291 * @spad_addr: OUT - The address of the peer scratchpad register.
1292 *
1293 * Return the address of the peer doorbell register. This may be used, for
1294 * example, by drivers that offload memory copy operations to a dma engine.
1295 *
1296 * Return: Zero on success, otherwise an error number.
1297 */
ntb_peer_spad_addr(struct ntb_dev * ntb,int pidx,int sidx,phys_addr_t * spad_addr)1298 static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
1299 phys_addr_t *spad_addr)
1300 {
1301 if (!ntb->ops->peer_spad_addr)
1302 return -EINVAL;
1303
1304 return ntb->ops->peer_spad_addr(ntb, pidx, sidx, spad_addr);
1305 }
1306
1307 /**
1308 * ntb_peer_spad_read() - read the peer scratchpad register
1309 * @ntb: NTB device context.
1310 * @pidx: Port index of peer device.
1311 * @sidx: Scratchpad index.
1312 *
1313 * Read the peer scratchpad register, and return the value.
1314 *
1315 * Return: The value of the local scratchpad register.
1316 */
ntb_peer_spad_read(struct ntb_dev * ntb,int pidx,int sidx)1317 static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
1318 {
1319 if (!ntb->ops->peer_spad_read)
1320 return ~(u32)0;
1321
1322 return ntb->ops->peer_spad_read(ntb, pidx, sidx);
1323 }
1324
1325 /**
1326 * ntb_peer_spad_write() - write the peer scratchpad register
1327 * @ntb: NTB device context.
1328 * @pidx: Port index of peer device.
1329 * @sidx: Scratchpad index.
1330 * @val: Scratchpad value.
1331 *
1332 * Write the value to the peer scratchpad register.
1333 *
1334 * Return: Zero on success, otherwise an error number.
1335 */
ntb_peer_spad_write(struct ntb_dev * ntb,int pidx,int sidx,u32 val)1336 static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
1337 u32 val)
1338 {
1339 if (!ntb->ops->peer_spad_write)
1340 return -EINVAL;
1341
1342 return ntb->ops->peer_spad_write(ntb, pidx, sidx, val);
1343 }
1344
1345 /**
1346 * ntb_msg_count() - get the number of message registers
1347 * @ntb: NTB device context.
1348 *
1349 * Hardware may support a different number of message registers.
1350 *
1351 * Return: the number of message registers.
1352 */
ntb_msg_count(struct ntb_dev * ntb)1353 static inline int ntb_msg_count(struct ntb_dev *ntb)
1354 {
1355 if (!ntb->ops->msg_count)
1356 return 0;
1357
1358 return ntb->ops->msg_count(ntb);
1359 }
1360
1361 /**
1362 * ntb_msg_inbits() - get a bitfield of inbound message registers status
1363 * @ntb: NTB device context.
1364 *
1365 * The method returns the bitfield of status and mask registers, which related
1366 * to inbound message registers.
1367 *
1368 * Return: bitfield of inbound message registers.
1369 */
ntb_msg_inbits(struct ntb_dev * ntb)1370 static inline u64 ntb_msg_inbits(struct ntb_dev *ntb)
1371 {
1372 if (!ntb->ops->msg_inbits)
1373 return 0;
1374
1375 return ntb->ops->msg_inbits(ntb);
1376 }
1377
1378 /**
1379 * ntb_msg_outbits() - get a bitfield of outbound message registers status
1380 * @ntb: NTB device context.
1381 *
1382 * The method returns the bitfield of status and mask registers, which related
1383 * to outbound message registers.
1384 *
1385 * Return: bitfield of outbound message registers.
1386 */
ntb_msg_outbits(struct ntb_dev * ntb)1387 static inline u64 ntb_msg_outbits(struct ntb_dev *ntb)
1388 {
1389 if (!ntb->ops->msg_outbits)
1390 return 0;
1391
1392 return ntb->ops->msg_outbits(ntb);
1393 }
1394
1395 /**
1396 * ntb_msg_read_sts() - read the message registers status
1397 * @ntb: NTB device context.
1398 *
1399 * Read the status of message register. Inbound and outbound message registers
1400 * related bits can be filtered by masks retrieved from ntb_msg_inbits() and
1401 * ntb_msg_outbits().
1402 *
1403 * Return: status bits of message registers
1404 */
ntb_msg_read_sts(struct ntb_dev * ntb)1405 static inline u64 ntb_msg_read_sts(struct ntb_dev *ntb)
1406 {
1407 if (!ntb->ops->msg_read_sts)
1408 return 0;
1409
1410 return ntb->ops->msg_read_sts(ntb);
1411 }
1412
1413 /**
1414 * ntb_msg_clear_sts() - clear status bits of message registers
1415 * @ntb: NTB device context.
1416 * @sts_bits: Status bits to clear.
1417 *
1418 * Clear bits in the status register.
1419 *
1420 * Return: Zero on success, otherwise a negative error number.
1421 */
ntb_msg_clear_sts(struct ntb_dev * ntb,u64 sts_bits)1422 static inline int ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits)
1423 {
1424 if (!ntb->ops->msg_clear_sts)
1425 return -EINVAL;
1426
1427 return ntb->ops->msg_clear_sts(ntb, sts_bits);
1428 }
1429
1430 /**
1431 * ntb_msg_set_mask() - set mask of message register status bits
1432 * @ntb: NTB device context.
1433 * @mask_bits: Mask bits.
1434 *
1435 * Mask the message registers status bits from raising the message event.
1436 *
1437 * Return: Zero on success, otherwise a negative error number.
1438 */
ntb_msg_set_mask(struct ntb_dev * ntb,u64 mask_bits)1439 static inline int ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits)
1440 {
1441 if (!ntb->ops->msg_set_mask)
1442 return -EINVAL;
1443
1444 return ntb->ops->msg_set_mask(ntb, mask_bits);
1445 }
1446
1447 /**
1448 * ntb_msg_clear_mask() - clear message registers mask
1449 * @ntb: NTB device context.
1450 * @mask_bits: Mask bits to clear.
1451 *
1452 * Clear bits in the message events mask register.
1453 *
1454 * Return: Zero on success, otherwise a negative error number.
1455 */
ntb_msg_clear_mask(struct ntb_dev * ntb,u64 mask_bits)1456 static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
1457 {
1458 if (!ntb->ops->msg_clear_mask)
1459 return -EINVAL;
1460
1461 return ntb->ops->msg_clear_mask(ntb, mask_bits);
1462 }
1463
1464 /**
1465 * ntb_msg_read() - read inbound message register with specified index
1466 * @ntb: NTB device context.
1467 * @pidx: OUT - Port index of peer device a message retrieved from
1468 * @midx: Message register index
1469 *
1470 * Read data from the specified message register. Source port index of a
1471 * message is retrieved as well.
1472 *
1473 * Return: The value of the inbound message register.
1474 */
ntb_msg_read(struct ntb_dev * ntb,int * pidx,int midx)1475 static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
1476 {
1477 if (!ntb->ops->msg_read)
1478 return ~(u32)0;
1479
1480 return ntb->ops->msg_read(ntb, pidx, midx);
1481 }
1482
1483 /**
1484 * ntb_peer_msg_write() - write data to the specified peer message register
1485 * @ntb: NTB device context.
1486 * @pidx: Port index of peer device a message being sent to
1487 * @midx: Message register index
1488 * @msg: Data to send
1489 *
1490 * Send data to a specified peer device using the defined message register.
1491 * Message event can be raised if the midx registers isn't empty while
1492 * calling this method and the corresponding interrupt isn't masked.
1493 *
1494 * Return: Zero on success, otherwise a negative error number.
1495 */
ntb_peer_msg_write(struct ntb_dev * ntb,int pidx,int midx,u32 msg)1496 static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
1497 u32 msg)
1498 {
1499 if (!ntb->ops->peer_msg_write)
1500 return -EINVAL;
1501
1502 return ntb->ops->peer_msg_write(ntb, pidx, midx, msg);
1503 }
1504
1505 #endif
1506