1 /*
2 * Microsemi Switchtec(tm) PCIe Management Driver
3 * Copyright (c) 2017, Microsemi Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16 #include <linux/switchtec.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/ntb.h>
22 #include <linux/pci.h>
23
24 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
25 MODULE_VERSION("0.1");
26 MODULE_LICENSE("GPL");
27 MODULE_AUTHOR("Microsemi Corporation");
28
29 static ulong max_mw_size = SZ_2M;
30 module_param(max_mw_size, ulong, 0644);
31 MODULE_PARM_DESC(max_mw_size,
32 "Max memory window size reported to the upper layer");
33
34 static bool use_lut_mws;
35 module_param(use_lut_mws, bool, 0644);
36 MODULE_PARM_DESC(use_lut_mws,
37 "Enable the use of the LUT based memory windows");
38
39 #ifndef ioread64
40 #ifdef readq
41 #define ioread64 readq
42 #else
43 #define ioread64 _ioread64
_ioread64(void __iomem * mmio)44 static inline u64 _ioread64(void __iomem *mmio)
45 {
46 u64 low, high;
47
48 low = ioread32(mmio);
49 high = ioread32(mmio + sizeof(u32));
50 return low | (high << 32);
51 }
52 #endif
53 #endif
54
55 #ifndef iowrite64
56 #ifdef writeq
57 #define iowrite64 writeq
58 #else
59 #define iowrite64 _iowrite64
_iowrite64(u64 val,void __iomem * mmio)60 static inline void _iowrite64(u64 val, void __iomem *mmio)
61 {
62 iowrite32(val, mmio);
63 iowrite32(val >> 32, mmio + sizeof(u32));
64 }
65 #endif
66 #endif
67
68 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
69 #define MAX_MWS 128
70
71 struct shared_mw {
72 u32 magic;
73 u32 link_sta;
74 u32 partition_id;
75 u64 mw_sizes[MAX_MWS];
76 u32 spad[128];
77 };
78
79 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
80 #define LUT_SIZE SZ_64K
81
82 struct switchtec_ntb {
83 struct ntb_dev ntb;
84 struct switchtec_dev *stdev;
85
86 int self_partition;
87 int peer_partition;
88
89 int doorbell_irq;
90 int message_irq;
91
92 struct ntb_info_regs __iomem *mmio_ntb;
93 struct ntb_ctrl_regs __iomem *mmio_ctrl;
94 struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
95 struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
96 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
97 struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
98 struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
99
100 void __iomem *mmio_xlink_win;
101
102 struct shared_mw *self_shared;
103 struct shared_mw __iomem *peer_shared;
104 dma_addr_t self_shared_dma;
105
106 u64 db_mask;
107 u64 db_valid_mask;
108 int db_shift;
109 int db_peer_shift;
110
111 /* synchronize rmw access of db_mask and hw reg */
112 spinlock_t db_mask_lock;
113
114 int nr_direct_mw;
115 int nr_lut_mw;
116 int nr_rsvd_luts;
117 int direct_mw_to_bar[MAX_DIRECT_MW];
118
119 int peer_nr_direct_mw;
120 int peer_nr_lut_mw;
121 int peer_direct_mw_to_bar[MAX_DIRECT_MW];
122
123 bool link_is_up;
124 enum ntb_speed link_speed;
125 enum ntb_width link_width;
126 struct work_struct link_reinit_work;
127 };
128
ntb_sndev(struct ntb_dev * ntb)129 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
130 {
131 return container_of(ntb, struct switchtec_ntb, ntb);
132 }
133
switchtec_ntb_part_op(struct switchtec_ntb * sndev,struct ntb_ctrl_regs __iomem * ctl,u32 op,int wait_status)134 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
135 struct ntb_ctrl_regs __iomem *ctl,
136 u32 op, int wait_status)
137 {
138 static const char * const op_text[] = {
139 [NTB_CTRL_PART_OP_LOCK] = "lock",
140 [NTB_CTRL_PART_OP_CFG] = "configure",
141 [NTB_CTRL_PART_OP_RESET] = "reset",
142 };
143
144 int i;
145 u32 ps;
146 int status;
147
148 switch (op) {
149 case NTB_CTRL_PART_OP_LOCK:
150 status = NTB_CTRL_PART_STATUS_LOCKING;
151 break;
152 case NTB_CTRL_PART_OP_CFG:
153 status = NTB_CTRL_PART_STATUS_CONFIGURING;
154 break;
155 case NTB_CTRL_PART_OP_RESET:
156 status = NTB_CTRL_PART_STATUS_RESETTING;
157 break;
158 default:
159 return -EINVAL;
160 }
161
162 iowrite32(op, &ctl->partition_op);
163
164 for (i = 0; i < 1000; i++) {
165 if (msleep_interruptible(50) != 0) {
166 iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
167 return -EINTR;
168 }
169
170 ps = ioread32(&ctl->partition_status) & 0xFFFF;
171
172 if (ps != status)
173 break;
174 }
175
176 if (ps == wait_status)
177 return 0;
178
179 if (ps == status) {
180 dev_err(&sndev->stdev->dev,
181 "Timed out while performing %s (%d). (%08x)\n",
182 op_text[op], op,
183 ioread32(&ctl->partition_status));
184
185 return -ETIMEDOUT;
186 }
187
188 return -EIO;
189 }
190
switchtec_ntb_send_msg(struct switchtec_ntb * sndev,int idx,u32 val)191 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
192 u32 val)
193 {
194 if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
195 return -EINVAL;
196
197 iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
198
199 return 0;
200 }
201
switchtec_ntb_mw_count(struct ntb_dev * ntb,int pidx)202 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
203 {
204 struct switchtec_ntb *sndev = ntb_sndev(ntb);
205 int nr_direct_mw = sndev->peer_nr_direct_mw;
206 int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
207
208 if (pidx != NTB_DEF_PEER_IDX)
209 return -EINVAL;
210
211 if (!use_lut_mws)
212 nr_lut_mw = 0;
213
214 return nr_direct_mw + nr_lut_mw;
215 }
216
lut_index(struct switchtec_ntb * sndev,int mw_idx)217 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
218 {
219 return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
220 }
221
peer_lut_index(struct switchtec_ntb * sndev,int mw_idx)222 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
223 {
224 return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
225 }
226
switchtec_ntb_mw_get_align(struct ntb_dev * ntb,int pidx,int widx,resource_size_t * addr_align,resource_size_t * size_align,resource_size_t * size_max)227 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
228 int widx, resource_size_t *addr_align,
229 resource_size_t *size_align,
230 resource_size_t *size_max)
231 {
232 struct switchtec_ntb *sndev = ntb_sndev(ntb);
233 int lut;
234 resource_size_t size;
235
236 if (pidx != NTB_DEF_PEER_IDX)
237 return -EINVAL;
238
239 lut = widx >= sndev->peer_nr_direct_mw;
240 size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
241
242 if (size == 0)
243 return -EINVAL;
244
245 if (addr_align)
246 *addr_align = lut ? size : SZ_4K;
247
248 if (size_align)
249 *size_align = lut ? size : SZ_4K;
250
251 if (size_max)
252 *size_max = size;
253
254 return 0;
255 }
256
switchtec_ntb_mw_clr_direct(struct switchtec_ntb * sndev,int idx)257 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
258 {
259 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
260 int bar = sndev->peer_direct_mw_to_bar[idx];
261 u32 ctl_val;
262
263 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
264 ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
265 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
266 iowrite32(0, &ctl->bar_entry[bar].win_size);
267 iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
268 }
269
switchtec_ntb_mw_clr_lut(struct switchtec_ntb * sndev,int idx)270 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
271 {
272 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
273
274 iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
275 }
276
switchtec_ntb_mw_set_direct(struct switchtec_ntb * sndev,int idx,dma_addr_t addr,resource_size_t size)277 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
278 dma_addr_t addr, resource_size_t size)
279 {
280 int xlate_pos = ilog2(size);
281 int bar = sndev->peer_direct_mw_to_bar[idx];
282 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
283 u32 ctl_val;
284
285 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
286 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
287
288 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
289 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
290 iowrite64(sndev->self_partition | addr,
291 &ctl->bar_entry[bar].xlate_addr);
292 }
293
switchtec_ntb_mw_set_lut(struct switchtec_ntb * sndev,int idx,dma_addr_t addr,resource_size_t size)294 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
295 dma_addr_t addr, resource_size_t size)
296 {
297 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
298
299 iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
300 &ctl->lut_entry[peer_lut_index(sndev, idx)]);
301 }
302
switchtec_ntb_mw_set_trans(struct ntb_dev * ntb,int pidx,int widx,dma_addr_t addr,resource_size_t size)303 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
304 dma_addr_t addr, resource_size_t size)
305 {
306 struct switchtec_ntb *sndev = ntb_sndev(ntb);
307 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
308 int xlate_pos = ilog2(size);
309 int nr_direct_mw = sndev->peer_nr_direct_mw;
310 int rc;
311
312 if (pidx != NTB_DEF_PEER_IDX)
313 return -EINVAL;
314
315 dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
316 widx, pidx, &addr, &size);
317
318 if (widx >= switchtec_ntb_mw_count(ntb, pidx))
319 return -EINVAL;
320
321 if (xlate_pos < 12)
322 return -EINVAL;
323
324 if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
325 /*
326 * In certain circumstances we can get a buffer that is
327 * not aligned to its size. (Most of the time
328 * dma_alloc_coherent ensures this). This can happen when
329 * using large buffers allocated by the CMA
330 * (see CMA_CONFIG_ALIGNMENT)
331 */
332 dev_err(&sndev->stdev->dev,
333 "ERROR: Memory window address is not aligned to it's size!\n");
334 return -EINVAL;
335 }
336
337 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
338 NTB_CTRL_PART_STATUS_LOCKED);
339 if (rc)
340 return rc;
341
342 if (addr == 0 || size == 0) {
343 if (widx < nr_direct_mw)
344 switchtec_ntb_mw_clr_direct(sndev, widx);
345 else
346 switchtec_ntb_mw_clr_lut(sndev, widx);
347 } else {
348 if (widx < nr_direct_mw)
349 switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
350 else
351 switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
352 }
353
354 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
355 NTB_CTRL_PART_STATUS_NORMAL);
356
357 if (rc == -EIO) {
358 dev_err(&sndev->stdev->dev,
359 "Hardware reported an error configuring mw %d: %08x\n",
360 widx, ioread32(&ctl->bar_error));
361
362 if (widx < nr_direct_mw)
363 switchtec_ntb_mw_clr_direct(sndev, widx);
364 else
365 switchtec_ntb_mw_clr_lut(sndev, widx);
366
367 switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
368 NTB_CTRL_PART_STATUS_NORMAL);
369 }
370
371 return rc;
372 }
373
switchtec_ntb_peer_mw_count(struct ntb_dev * ntb)374 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
375 {
376 struct switchtec_ntb *sndev = ntb_sndev(ntb);
377 int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
378
379 return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
380 }
381
switchtec_ntb_direct_get_addr(struct switchtec_ntb * sndev,int idx,phys_addr_t * base,resource_size_t * size)382 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
383 int idx, phys_addr_t *base,
384 resource_size_t *size)
385 {
386 int bar = sndev->direct_mw_to_bar[idx];
387 size_t offset = 0;
388
389 if (bar < 0)
390 return -EINVAL;
391
392 if (idx == 0) {
393 /*
394 * This is the direct BAR shared with the LUTs
395 * which means the actual window will be offset
396 * by the size of all the LUT entries.
397 */
398
399 offset = LUT_SIZE * sndev->nr_lut_mw;
400 }
401
402 if (base)
403 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
404
405 if (size) {
406 *size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
407 if (offset && *size > offset)
408 *size = offset;
409
410 if (*size > max_mw_size)
411 *size = max_mw_size;
412 }
413
414 return 0;
415 }
416
switchtec_ntb_lut_get_addr(struct switchtec_ntb * sndev,int idx,phys_addr_t * base,resource_size_t * size)417 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
418 int idx, phys_addr_t *base,
419 resource_size_t *size)
420 {
421 int bar = sndev->direct_mw_to_bar[0];
422 int offset;
423
424 offset = LUT_SIZE * lut_index(sndev, idx);
425
426 if (base)
427 *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
428
429 if (size)
430 *size = LUT_SIZE;
431
432 return 0;
433 }
434
switchtec_ntb_peer_mw_get_addr(struct ntb_dev * ntb,int idx,phys_addr_t * base,resource_size_t * size)435 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
436 phys_addr_t *base,
437 resource_size_t *size)
438 {
439 struct switchtec_ntb *sndev = ntb_sndev(ntb);
440
441 if (idx < sndev->nr_direct_mw)
442 return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
443 else if (idx < switchtec_ntb_peer_mw_count(ntb))
444 return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
445 else
446 return -EINVAL;
447 }
448
switchtec_ntb_part_link_speed(struct switchtec_ntb * sndev,int partition,enum ntb_speed * speed,enum ntb_width * width)449 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
450 int partition,
451 enum ntb_speed *speed,
452 enum ntb_width *width)
453 {
454 struct switchtec_dev *stdev = sndev->stdev;
455
456 u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
457 u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
458
459 if (speed)
460 *speed = (linksta >> 16) & 0xF;
461
462 if (width)
463 *width = (linksta >> 20) & 0x3F;
464 }
465
switchtec_ntb_set_link_speed(struct switchtec_ntb * sndev)466 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
467 {
468 enum ntb_speed self_speed, peer_speed;
469 enum ntb_width self_width, peer_width;
470
471 if (!sndev->link_is_up) {
472 sndev->link_speed = NTB_SPEED_NONE;
473 sndev->link_width = NTB_WIDTH_NONE;
474 return;
475 }
476
477 switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
478 &self_speed, &self_width);
479 switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
480 &peer_speed, &peer_width);
481
482 sndev->link_speed = min(self_speed, peer_speed);
483 sndev->link_width = min(self_width, peer_width);
484 }
485
crosslink_is_enabled(struct switchtec_ntb * sndev)486 static int crosslink_is_enabled(struct switchtec_ntb *sndev)
487 {
488 struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
489
490 return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
491 }
492
crosslink_init_dbmsgs(struct switchtec_ntb * sndev)493 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
494 {
495 int i;
496 u32 msg_map = 0;
497
498 if (!crosslink_is_enabled(sndev))
499 return;
500
501 for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
502 int m = i | sndev->self_partition << 2;
503
504 msg_map |= m << i * 8;
505 }
506
507 iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
508 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
509 &sndev->mmio_peer_dbmsg->odb_mask);
510 }
511
512 enum switchtec_msg {
513 LINK_MESSAGE = 0,
514 MSG_LINK_UP = 1,
515 MSG_LINK_DOWN = 2,
516 MSG_CHECK_LINK = 3,
517 MSG_LINK_FORCE_DOWN = 4,
518 };
519
520 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
521
link_reinit_work(struct work_struct * work)522 static void link_reinit_work(struct work_struct *work)
523 {
524 struct switchtec_ntb *sndev;
525
526 sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
527
528 switchtec_ntb_reinit_peer(sndev);
529 }
530
switchtec_ntb_check_link(struct switchtec_ntb * sndev,enum switchtec_msg msg)531 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
532 enum switchtec_msg msg)
533 {
534 int link_sta;
535 int old = sndev->link_is_up;
536
537 if (msg == MSG_LINK_FORCE_DOWN) {
538 schedule_work(&sndev->link_reinit_work);
539
540 if (sndev->link_is_up) {
541 sndev->link_is_up = 0;
542 ntb_link_event(&sndev->ntb);
543 dev_info(&sndev->stdev->dev, "ntb link forced down\n");
544 }
545
546 return;
547 }
548
549 link_sta = sndev->self_shared->link_sta;
550 if (link_sta) {
551 u64 peer = ioread64(&sndev->peer_shared->magic);
552
553 if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
554 link_sta = peer >> 32;
555 else
556 link_sta = 0;
557 }
558
559 sndev->link_is_up = link_sta;
560 switchtec_ntb_set_link_speed(sndev);
561
562 if (link_sta != old) {
563 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
564 ntb_link_event(&sndev->ntb);
565 dev_info(&sndev->stdev->dev, "ntb link %s\n",
566 link_sta ? "up" : "down");
567
568 if (link_sta)
569 crosslink_init_dbmsgs(sndev);
570 }
571 }
572
switchtec_ntb_link_notification(struct switchtec_dev * stdev)573 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
574 {
575 struct switchtec_ntb *sndev = stdev->sndev;
576
577 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
578 }
579
switchtec_ntb_link_is_up(struct ntb_dev * ntb,enum ntb_speed * speed,enum ntb_width * width)580 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
581 enum ntb_speed *speed,
582 enum ntb_width *width)
583 {
584 struct switchtec_ntb *sndev = ntb_sndev(ntb);
585
586 if (speed)
587 *speed = sndev->link_speed;
588 if (width)
589 *width = sndev->link_width;
590
591 return sndev->link_is_up;
592 }
593
switchtec_ntb_link_enable(struct ntb_dev * ntb,enum ntb_speed max_speed,enum ntb_width max_width)594 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
595 enum ntb_speed max_speed,
596 enum ntb_width max_width)
597 {
598 struct switchtec_ntb *sndev = ntb_sndev(ntb);
599
600 dev_dbg(&sndev->stdev->dev, "enabling link\n");
601
602 sndev->self_shared->link_sta = 1;
603 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
604
605 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
606
607 return 0;
608 }
609
switchtec_ntb_link_disable(struct ntb_dev * ntb)610 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
611 {
612 struct switchtec_ntb *sndev = ntb_sndev(ntb);
613
614 dev_dbg(&sndev->stdev->dev, "disabling link\n");
615
616 sndev->self_shared->link_sta = 0;
617 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
618
619 switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
620
621 return 0;
622 }
623
switchtec_ntb_db_valid_mask(struct ntb_dev * ntb)624 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
625 {
626 struct switchtec_ntb *sndev = ntb_sndev(ntb);
627
628 return sndev->db_valid_mask;
629 }
630
switchtec_ntb_db_vector_count(struct ntb_dev * ntb)631 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
632 {
633 return 1;
634 }
635
switchtec_ntb_db_vector_mask(struct ntb_dev * ntb,int db_vector)636 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
637 {
638 struct switchtec_ntb *sndev = ntb_sndev(ntb);
639
640 if (db_vector < 0 || db_vector > 1)
641 return 0;
642
643 return sndev->db_valid_mask;
644 }
645
switchtec_ntb_db_read(struct ntb_dev * ntb)646 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
647 {
648 u64 ret;
649 struct switchtec_ntb *sndev = ntb_sndev(ntb);
650
651 ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
652
653 return ret & sndev->db_valid_mask;
654 }
655
switchtec_ntb_db_clear(struct ntb_dev * ntb,u64 db_bits)656 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
657 {
658 struct switchtec_ntb *sndev = ntb_sndev(ntb);
659
660 iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
661
662 return 0;
663 }
664
switchtec_ntb_db_set_mask(struct ntb_dev * ntb,u64 db_bits)665 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
666 {
667 unsigned long irqflags;
668 struct switchtec_ntb *sndev = ntb_sndev(ntb);
669
670 if (db_bits & ~sndev->db_valid_mask)
671 return -EINVAL;
672
673 spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
674
675 sndev->db_mask |= db_bits << sndev->db_shift;
676 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
677
678 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
679
680 return 0;
681 }
682
switchtec_ntb_db_clear_mask(struct ntb_dev * ntb,u64 db_bits)683 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
684 {
685 unsigned long irqflags;
686 struct switchtec_ntb *sndev = ntb_sndev(ntb);
687
688 if (db_bits & ~sndev->db_valid_mask)
689 return -EINVAL;
690
691 spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
692
693 sndev->db_mask &= ~(db_bits << sndev->db_shift);
694 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
695
696 spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
697
698 return 0;
699 }
700
switchtec_ntb_db_read_mask(struct ntb_dev * ntb)701 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
702 {
703 struct switchtec_ntb *sndev = ntb_sndev(ntb);
704
705 return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
706 }
707
switchtec_ntb_peer_db_addr(struct ntb_dev * ntb,phys_addr_t * db_addr,resource_size_t * db_size)708 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
709 phys_addr_t *db_addr,
710 resource_size_t *db_size)
711 {
712 struct switchtec_ntb *sndev = ntb_sndev(ntb);
713 unsigned long offset;
714
715 offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
716 (unsigned long)sndev->stdev->mmio;
717
718 offset += sndev->db_shift / 8;
719
720 if (db_addr)
721 *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
722 if (db_size)
723 *db_size = sizeof(u32);
724
725 return 0;
726 }
727
switchtec_ntb_peer_db_set(struct ntb_dev * ntb,u64 db_bits)728 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
729 {
730 struct switchtec_ntb *sndev = ntb_sndev(ntb);
731
732 iowrite64(db_bits << sndev->db_peer_shift,
733 &sndev->mmio_peer_dbmsg->odb);
734
735 return 0;
736 }
737
switchtec_ntb_spad_count(struct ntb_dev * ntb)738 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
739 {
740 struct switchtec_ntb *sndev = ntb_sndev(ntb);
741
742 return ARRAY_SIZE(sndev->self_shared->spad);
743 }
744
switchtec_ntb_spad_read(struct ntb_dev * ntb,int idx)745 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
746 {
747 struct switchtec_ntb *sndev = ntb_sndev(ntb);
748
749 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
750 return 0;
751
752 if (!sndev->self_shared)
753 return 0;
754
755 return sndev->self_shared->spad[idx];
756 }
757
switchtec_ntb_spad_write(struct ntb_dev * ntb,int idx,u32 val)758 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
759 {
760 struct switchtec_ntb *sndev = ntb_sndev(ntb);
761
762 if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
763 return -EINVAL;
764
765 if (!sndev->self_shared)
766 return -EIO;
767
768 sndev->self_shared->spad[idx] = val;
769
770 return 0;
771 }
772
switchtec_ntb_peer_spad_read(struct ntb_dev * ntb,int pidx,int sidx)773 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
774 int sidx)
775 {
776 struct switchtec_ntb *sndev = ntb_sndev(ntb);
777
778 if (pidx != NTB_DEF_PEER_IDX)
779 return -EINVAL;
780
781 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
782 return 0;
783
784 if (!sndev->peer_shared)
785 return 0;
786
787 return ioread32(&sndev->peer_shared->spad[sidx]);
788 }
789
switchtec_ntb_peer_spad_write(struct ntb_dev * ntb,int pidx,int sidx,u32 val)790 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
791 int sidx, u32 val)
792 {
793 struct switchtec_ntb *sndev = ntb_sndev(ntb);
794
795 if (pidx != NTB_DEF_PEER_IDX)
796 return -EINVAL;
797
798 if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
799 return -EINVAL;
800
801 if (!sndev->peer_shared)
802 return -EIO;
803
804 iowrite32(val, &sndev->peer_shared->spad[sidx]);
805
806 return 0;
807 }
808
switchtec_ntb_peer_spad_addr(struct ntb_dev * ntb,int pidx,int sidx,phys_addr_t * spad_addr)809 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
810 int sidx, phys_addr_t *spad_addr)
811 {
812 struct switchtec_ntb *sndev = ntb_sndev(ntb);
813 unsigned long offset;
814
815 if (pidx != NTB_DEF_PEER_IDX)
816 return -EINVAL;
817
818 offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
819 (unsigned long)sndev->stdev->mmio;
820
821 if (spad_addr)
822 *spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
823
824 return 0;
825 }
826
827 static const struct ntb_dev_ops switchtec_ntb_ops = {
828 .mw_count = switchtec_ntb_mw_count,
829 .mw_get_align = switchtec_ntb_mw_get_align,
830 .mw_set_trans = switchtec_ntb_mw_set_trans,
831 .peer_mw_count = switchtec_ntb_peer_mw_count,
832 .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
833 .link_is_up = switchtec_ntb_link_is_up,
834 .link_enable = switchtec_ntb_link_enable,
835 .link_disable = switchtec_ntb_link_disable,
836 .db_valid_mask = switchtec_ntb_db_valid_mask,
837 .db_vector_count = switchtec_ntb_db_vector_count,
838 .db_vector_mask = switchtec_ntb_db_vector_mask,
839 .db_read = switchtec_ntb_db_read,
840 .db_clear = switchtec_ntb_db_clear,
841 .db_set_mask = switchtec_ntb_db_set_mask,
842 .db_clear_mask = switchtec_ntb_db_clear_mask,
843 .db_read_mask = switchtec_ntb_db_read_mask,
844 .peer_db_addr = switchtec_ntb_peer_db_addr,
845 .peer_db_set = switchtec_ntb_peer_db_set,
846 .spad_count = switchtec_ntb_spad_count,
847 .spad_read = switchtec_ntb_spad_read,
848 .spad_write = switchtec_ntb_spad_write,
849 .peer_spad_read = switchtec_ntb_peer_spad_read,
850 .peer_spad_write = switchtec_ntb_peer_spad_write,
851 .peer_spad_addr = switchtec_ntb_peer_spad_addr,
852 };
853
switchtec_ntb_init_sndev(struct switchtec_ntb * sndev)854 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
855 {
856 u64 tpart_vec;
857 int self;
858 u64 part_map;
859 int bit;
860
861 sndev->ntb.pdev = sndev->stdev->pdev;
862 sndev->ntb.topo = NTB_TOPO_SWITCH;
863 sndev->ntb.ops = &switchtec_ntb_ops;
864
865 INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
866
867 sndev->self_partition = sndev->stdev->partition;
868
869 sndev->mmio_ntb = sndev->stdev->mmio_ntb;
870
871 self = sndev->self_partition;
872 tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
873 tpart_vec <<= 32;
874 tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
875
876 part_map = ioread64(&sndev->mmio_ntb->ep_map);
877 part_map &= ~(1 << sndev->self_partition);
878
879 if (!ffs(tpart_vec)) {
880 if (sndev->stdev->partition_count != 2) {
881 dev_err(&sndev->stdev->dev,
882 "ntb target partition not defined\n");
883 return -ENODEV;
884 }
885
886 bit = ffs(part_map);
887 if (!bit) {
888 dev_err(&sndev->stdev->dev,
889 "peer partition is not NT partition\n");
890 return -ENODEV;
891 }
892
893 sndev->peer_partition = bit - 1;
894 } else {
895 if (ffs(tpart_vec) != fls(tpart_vec)) {
896 dev_err(&sndev->stdev->dev,
897 "ntb driver only supports 1 pair of 1-1 ntb mapping\n");
898 return -ENODEV;
899 }
900
901 sndev->peer_partition = ffs(tpart_vec) - 1;
902 if (!(part_map & (1ULL << sndev->peer_partition))) {
903 dev_err(&sndev->stdev->dev,
904 "ntb target partition is not NT partition\n");
905 return -ENODEV;
906 }
907 }
908
909 dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
910 sndev->self_partition, sndev->stdev->partition_count);
911
912 sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
913 SWITCHTEC_NTB_REG_CTRL_OFFSET;
914 sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
915 SWITCHTEC_NTB_REG_DBMSG_OFFSET;
916
917 sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
918 sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
919 sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
920 sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
921
922 return 0;
923 }
924
config_rsvd_lut_win(struct switchtec_ntb * sndev,struct ntb_ctrl_regs __iomem * ctl,int lut_idx,int partition,u64 addr)925 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
926 struct ntb_ctrl_regs __iomem *ctl,
927 int lut_idx, int partition, u64 addr)
928 {
929 int peer_bar = sndev->peer_direct_mw_to_bar[0];
930 u32 ctl_val;
931 int rc;
932
933 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
934 NTB_CTRL_PART_STATUS_LOCKED);
935 if (rc)
936 return rc;
937
938 ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
939 ctl_val &= 0xFF;
940 ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
941 ctl_val |= ilog2(LUT_SIZE) << 8;
942 ctl_val |= (sndev->nr_lut_mw - 1) << 14;
943 iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
944
945 iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
946 &ctl->lut_entry[lut_idx]);
947
948 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
949 NTB_CTRL_PART_STATUS_NORMAL);
950 if (rc) {
951 u32 bar_error, lut_error;
952
953 bar_error = ioread32(&ctl->bar_error);
954 lut_error = ioread32(&ctl->lut_error);
955 dev_err(&sndev->stdev->dev,
956 "Error setting up reserved lut window: %08x / %08x\n",
957 bar_error, lut_error);
958 return rc;
959 }
960
961 return 0;
962 }
963
config_req_id_table(struct switchtec_ntb * sndev,struct ntb_ctrl_regs __iomem * mmio_ctrl,int * req_ids,int count)964 static int config_req_id_table(struct switchtec_ntb *sndev,
965 struct ntb_ctrl_regs __iomem *mmio_ctrl,
966 int *req_ids, int count)
967 {
968 int i, rc = 0;
969 u32 error;
970 u32 proxy_id;
971
972 if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
973 dev_err(&sndev->stdev->dev,
974 "Not enough requester IDs available.\n");
975 return -EFAULT;
976 }
977
978 rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
979 NTB_CTRL_PART_OP_LOCK,
980 NTB_CTRL_PART_STATUS_LOCKED);
981 if (rc)
982 return rc;
983
984 iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
985 &mmio_ctrl->partition_ctrl);
986
987 for (i = 0; i < count; i++) {
988 iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
989 &mmio_ctrl->req_id_table[i]);
990
991 proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
992 dev_dbg(&sndev->stdev->dev,
993 "Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
994 req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
995 req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
996 (proxy_id >> 1) & 0x7);
997 }
998
999 rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
1000 NTB_CTRL_PART_OP_CFG,
1001 NTB_CTRL_PART_STATUS_NORMAL);
1002
1003 if (rc == -EIO) {
1004 error = ioread32(&mmio_ctrl->req_id_error);
1005 dev_err(&sndev->stdev->dev,
1006 "Error setting up the requester ID table: %08x\n",
1007 error);
1008 }
1009
1010 return 0;
1011 }
1012
crosslink_setup_mws(struct switchtec_ntb * sndev,int ntb_lut_idx,u64 * mw_addrs,int mw_count)1013 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
1014 u64 *mw_addrs, int mw_count)
1015 {
1016 int rc, i;
1017 struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
1018 u64 addr;
1019 size_t size, offset;
1020 int bar;
1021 int xlate_pos;
1022 u32 ctl_val;
1023
1024 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
1025 NTB_CTRL_PART_STATUS_LOCKED);
1026 if (rc)
1027 return rc;
1028
1029 for (i = 0; i < sndev->nr_lut_mw; i++) {
1030 if (i == ntb_lut_idx)
1031 continue;
1032
1033 addr = mw_addrs[0] + LUT_SIZE * i;
1034
1035 iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
1036 addr),
1037 &ctl->lut_entry[i]);
1038 }
1039
1040 sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
1041
1042 for (i = 0; i < sndev->nr_direct_mw; i++) {
1043 bar = sndev->direct_mw_to_bar[i];
1044 offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
1045 addr = mw_addrs[i] + offset;
1046 size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
1047 xlate_pos = ilog2(size);
1048
1049 if (offset && size > offset)
1050 size = offset;
1051
1052 ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
1053 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
1054
1055 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
1056 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
1057 iowrite64(sndev->peer_partition | addr,
1058 &ctl->bar_entry[bar].xlate_addr);
1059 }
1060
1061 rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1062 NTB_CTRL_PART_STATUS_NORMAL);
1063 if (rc) {
1064 u32 bar_error, lut_error;
1065
1066 bar_error = ioread32(&ctl->bar_error);
1067 lut_error = ioread32(&ctl->lut_error);
1068 dev_err(&sndev->stdev->dev,
1069 "Error setting up cross link windows: %08x / %08x\n",
1070 bar_error, lut_error);
1071 return rc;
1072 }
1073
1074 return 0;
1075 }
1076
crosslink_setup_req_ids(struct switchtec_ntb * sndev,struct ntb_ctrl_regs __iomem * mmio_ctrl)1077 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
1078 struct ntb_ctrl_regs __iomem *mmio_ctrl)
1079 {
1080 int req_ids[16];
1081 int i;
1082 u32 proxy_id;
1083
1084 for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
1085 proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
1086
1087 if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
1088 break;
1089
1090 req_ids[i] = ((proxy_id >> 1) & 0xFF);
1091 }
1092
1093 return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
1094 }
1095
1096 /*
1097 * In crosslink configuration there is a virtual partition in the
1098 * middle of the two switches. The BARs in this partition have to be
1099 * enumerated and assigned addresses.
1100 */
crosslink_enum_partition(struct switchtec_ntb * sndev,u64 * bar_addrs)1101 static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1102 u64 *bar_addrs)
1103 {
1104 struct part_cfg_regs __iomem *part_cfg =
1105 &sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
1106 u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
1107 struct pff_csr_regs __iomem *mmio_pff =
1108 &sndev->stdev->mmio_pff_csr[pff];
1109 const u64 bar_space = 0x1000000000LL;
1110 u64 bar_addr;
1111 int bar_cnt = 0;
1112 int i;
1113
1114 iowrite16(0x6, &mmio_pff->pcicmd);
1115
1116 for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
1117 iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
1118 bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
1119 bar_addr &= ~0xf;
1120
1121 dev_dbg(&sndev->stdev->dev,
1122 "Crosslink BAR%d addr: %llx\n",
1123 i*2, bar_addr);
1124
1125 if (bar_addr != bar_space * i)
1126 continue;
1127
1128 bar_addrs[bar_cnt++] = bar_addr;
1129 }
1130
1131 return bar_cnt;
1132 }
1133
switchtec_ntb_init_crosslink(struct switchtec_ntb * sndev)1134 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
1135 {
1136 int rc;
1137 int bar = sndev->direct_mw_to_bar[0];
1138 const int ntb_lut_idx = 1;
1139 u64 bar_addrs[6];
1140 u64 addr;
1141 int offset;
1142 int bar_cnt;
1143
1144 if (!crosslink_is_enabled(sndev))
1145 return 0;
1146
1147 dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
1148 sndev->ntb.topo = NTB_TOPO_CROSSLINK;
1149
1150 bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
1151 if (bar_cnt < sndev->nr_direct_mw + 1) {
1152 dev_err(&sndev->stdev->dev,
1153 "Error enumerating crosslink partition\n");
1154 return -EINVAL;
1155 }
1156
1157 addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
1158 SWITCHTEC_NTB_REG_DBMSG_OFFSET +
1159 sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
1160
1161 offset = addr & (LUT_SIZE - 1);
1162 addr -= offset;
1163
1164 rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
1165 sndev->peer_partition, addr);
1166 if (rc)
1167 return rc;
1168
1169 rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
1170 bar_cnt - 1);
1171 if (rc)
1172 return rc;
1173
1174 rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
1175 if (rc)
1176 return rc;
1177
1178 sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
1179 LUT_SIZE, LUT_SIZE);
1180 if (!sndev->mmio_xlink_win) {
1181 rc = -ENOMEM;
1182 return rc;
1183 }
1184
1185 sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
1186 sndev->nr_rsvd_luts++;
1187
1188 crosslink_init_dbmsgs(sndev);
1189
1190 return 0;
1191 }
1192
switchtec_ntb_deinit_crosslink(struct switchtec_ntb * sndev)1193 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
1194 {
1195 if (sndev->mmio_xlink_win)
1196 pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
1197 }
1198
map_bars(int * map,struct ntb_ctrl_regs __iomem * ctrl)1199 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
1200 {
1201 int i;
1202 int cnt = 0;
1203
1204 for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
1205 u32 r = ioread32(&ctrl->bar_entry[i].ctl);
1206
1207 if (r & NTB_CTRL_BAR_VALID)
1208 map[cnt++] = i;
1209 }
1210
1211 return cnt;
1212 }
1213
switchtec_ntb_init_mw(struct switchtec_ntb * sndev)1214 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
1215 {
1216 sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
1217 sndev->mmio_self_ctrl);
1218
1219 sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
1220 sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
1221
1222 dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
1223 sndev->nr_direct_mw, sndev->nr_lut_mw);
1224
1225 sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
1226 sndev->mmio_peer_ctrl);
1227
1228 sndev->peer_nr_lut_mw =
1229 ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
1230 sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
1231
1232 dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
1233 sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
1234
1235 }
1236
1237 /*
1238 * There are 64 doorbells in the switch hardware but this is
1239 * shared among all partitions. So we must split them in half
1240 * (32 for each partition). However, the message interrupts are
1241 * also shared with the top 4 doorbells so we just limit this to
1242 * 28 doorbells per partition.
1243 *
1244 * In crosslink mode, each side has it's own dbmsg register so
1245 * they can each use all 60 of the available doorbells.
1246 */
switchtec_ntb_init_db(struct switchtec_ntb * sndev)1247 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
1248 {
1249 sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
1250
1251 if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
1252 sndev->db_shift = 0;
1253 sndev->db_peer_shift = 0;
1254 sndev->db_valid_mask = sndev->db_mask;
1255 } else if (sndev->self_partition < sndev->peer_partition) {
1256 sndev->db_shift = 0;
1257 sndev->db_peer_shift = 32;
1258 sndev->db_valid_mask = 0x0FFFFFFF;
1259 } else {
1260 sndev->db_shift = 32;
1261 sndev->db_peer_shift = 0;
1262 sndev->db_valid_mask = 0x0FFFFFFF;
1263 }
1264
1265 iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
1266 iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
1267 &sndev->mmio_peer_dbmsg->odb_mask);
1268
1269 dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
1270 sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
1271 }
1272
switchtec_ntb_init_msgs(struct switchtec_ntb * sndev)1273 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1274 {
1275 int i;
1276 u32 msg_map = 0;
1277
1278 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1279 int m = i | sndev->peer_partition << 2;
1280
1281 msg_map |= m << i * 8;
1282 }
1283
1284 iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1285
1286 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1287 iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1288 &sndev->mmio_self_dbmsg->imsg[i]);
1289 }
1290
1291 static int
switchtec_ntb_init_req_id_table(struct switchtec_ntb * sndev)1292 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1293 {
1294 int req_ids[2];
1295
1296 /*
1297 * Root Complex Requester ID (which is 0:00.0)
1298 */
1299 req_ids[0] = 0;
1300
1301 /*
1302 * Host Bridge Requester ID (as read from the mmap address)
1303 */
1304 req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1305
1306 return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1307 ARRAY_SIZE(req_ids));
1308 }
1309
switchtec_ntb_init_shared(struct switchtec_ntb * sndev)1310 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1311 {
1312 int i;
1313
1314 memset(sndev->self_shared, 0, LUT_SIZE);
1315 sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1316 sndev->self_shared->partition_id = sndev->stdev->partition;
1317
1318 for (i = 0; i < sndev->nr_direct_mw; i++) {
1319 int bar = sndev->direct_mw_to_bar[i];
1320 resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1321
1322 if (i == 0)
1323 sz = min_t(resource_size_t, sz,
1324 LUT_SIZE * sndev->nr_lut_mw);
1325
1326 sndev->self_shared->mw_sizes[i] = sz;
1327 }
1328
1329 for (i = 0; i < sndev->nr_lut_mw; i++) {
1330 int idx = sndev->nr_direct_mw + i;
1331
1332 sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1333 }
1334 }
1335
switchtec_ntb_init_shared_mw(struct switchtec_ntb * sndev)1336 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1337 {
1338 int self_bar = sndev->direct_mw_to_bar[0];
1339 int rc;
1340
1341 sndev->nr_rsvd_luts++;
1342 sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
1343 LUT_SIZE,
1344 &sndev->self_shared_dma,
1345 GFP_KERNEL);
1346 if (!sndev->self_shared) {
1347 dev_err(&sndev->stdev->dev,
1348 "unable to allocate memory for shared mw\n");
1349 return -ENOMEM;
1350 }
1351
1352 switchtec_ntb_init_shared(sndev);
1353
1354 rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1355 sndev->self_partition,
1356 sndev->self_shared_dma);
1357 if (rc)
1358 goto unalloc_and_exit;
1359
1360 sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1361 if (!sndev->peer_shared) {
1362 rc = -ENOMEM;
1363 goto unalloc_and_exit;
1364 }
1365
1366 dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1367 return 0;
1368
1369 unalloc_and_exit:
1370 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1371 sndev->self_shared, sndev->self_shared_dma);
1372
1373 return rc;
1374 }
1375
switchtec_ntb_deinit_shared_mw(struct switchtec_ntb * sndev)1376 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1377 {
1378 if (sndev->peer_shared)
1379 pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1380
1381 if (sndev->self_shared)
1382 dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1383 sndev->self_shared,
1384 sndev->self_shared_dma);
1385 sndev->nr_rsvd_luts--;
1386 }
1387
switchtec_ntb_doorbell_isr(int irq,void * dev)1388 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1389 {
1390 struct switchtec_ntb *sndev = dev;
1391
1392 dev_dbg(&sndev->stdev->dev, "doorbell\n");
1393
1394 ntb_db_event(&sndev->ntb, 0);
1395
1396 return IRQ_HANDLED;
1397 }
1398
switchtec_ntb_message_isr(int irq,void * dev)1399 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1400 {
1401 int i;
1402 struct switchtec_ntb *sndev = dev;
1403
1404 for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1405 u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1406
1407 if (msg & NTB_DBMSG_IMSG_STATUS) {
1408 dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1409 i, (u32)msg);
1410 iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1411
1412 if (i == LINK_MESSAGE)
1413 switchtec_ntb_check_link(sndev, msg);
1414 }
1415 }
1416
1417 return IRQ_HANDLED;
1418 }
1419
switchtec_ntb_init_db_msg_irq(struct switchtec_ntb * sndev)1420 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1421 {
1422 int i;
1423 int rc;
1424 int doorbell_irq = 0;
1425 int message_irq = 0;
1426 int event_irq;
1427 int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1428
1429 event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1430
1431 while (doorbell_irq == event_irq)
1432 doorbell_irq++;
1433 while (message_irq == doorbell_irq ||
1434 message_irq == event_irq)
1435 message_irq++;
1436
1437 dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1438 event_irq, doorbell_irq, message_irq);
1439
1440 for (i = 0; i < idb_vecs - 4; i++)
1441 iowrite8(doorbell_irq,
1442 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1443
1444 for (; i < idb_vecs; i++)
1445 iowrite8(message_irq,
1446 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1447
1448 sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1449 sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1450
1451 rc = request_irq(sndev->doorbell_irq,
1452 switchtec_ntb_doorbell_isr, 0,
1453 "switchtec_ntb_doorbell", sndev);
1454 if (rc)
1455 return rc;
1456
1457 rc = request_irq(sndev->message_irq,
1458 switchtec_ntb_message_isr, 0,
1459 "switchtec_ntb_message", sndev);
1460 if (rc) {
1461 free_irq(sndev->doorbell_irq, sndev);
1462 return rc;
1463 }
1464
1465 return 0;
1466 }
1467
switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb * sndev)1468 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1469 {
1470 free_irq(sndev->doorbell_irq, sndev);
1471 free_irq(sndev->message_irq, sndev);
1472 }
1473
switchtec_ntb_reinit_peer(struct switchtec_ntb * sndev)1474 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
1475 {
1476 dev_info(&sndev->stdev->dev, "peer reinitialized\n");
1477 switchtec_ntb_deinit_shared_mw(sndev);
1478 switchtec_ntb_init_mw(sndev);
1479 return switchtec_ntb_init_shared_mw(sndev);
1480 }
1481
switchtec_ntb_add(struct device * dev,struct class_interface * class_intf)1482 static int switchtec_ntb_add(struct device *dev,
1483 struct class_interface *class_intf)
1484 {
1485 struct switchtec_dev *stdev = to_stdev(dev);
1486 struct switchtec_ntb *sndev;
1487 int rc;
1488
1489 stdev->sndev = NULL;
1490
1491 if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
1492 return -ENODEV;
1493
1494 sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1495 if (!sndev)
1496 return -ENOMEM;
1497
1498 sndev->stdev = stdev;
1499 rc = switchtec_ntb_init_sndev(sndev);
1500 if (rc)
1501 goto free_and_exit;
1502
1503 switchtec_ntb_init_mw(sndev);
1504
1505 rc = switchtec_ntb_init_req_id_table(sndev);
1506 if (rc)
1507 goto free_and_exit;
1508
1509 rc = switchtec_ntb_init_crosslink(sndev);
1510 if (rc)
1511 goto free_and_exit;
1512
1513 switchtec_ntb_init_db(sndev);
1514 switchtec_ntb_init_msgs(sndev);
1515
1516 rc = switchtec_ntb_init_shared_mw(sndev);
1517 if (rc)
1518 goto deinit_crosslink;
1519
1520 rc = switchtec_ntb_init_db_msg_irq(sndev);
1521 if (rc)
1522 goto deinit_shared_and_exit;
1523
1524 /*
1525 * If this host crashed, the other host may think the link is
1526 * still up. Tell them to force it down (it will go back up
1527 * once we register the ntb device).
1528 */
1529 switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
1530
1531 rc = ntb_register_device(&sndev->ntb);
1532 if (rc)
1533 goto deinit_and_exit;
1534
1535 stdev->sndev = sndev;
1536 stdev->link_notifier = switchtec_ntb_link_notification;
1537 dev_info(dev, "NTB device registered\n");
1538
1539 return 0;
1540
1541 deinit_and_exit:
1542 switchtec_ntb_deinit_db_msg_irq(sndev);
1543 deinit_shared_and_exit:
1544 switchtec_ntb_deinit_shared_mw(sndev);
1545 deinit_crosslink:
1546 switchtec_ntb_deinit_crosslink(sndev);
1547 free_and_exit:
1548 kfree(sndev);
1549 dev_err(dev, "failed to register ntb device: %d\n", rc);
1550 return rc;
1551 }
1552
switchtec_ntb_remove(struct device * dev,struct class_interface * class_intf)1553 static void switchtec_ntb_remove(struct device *dev,
1554 struct class_interface *class_intf)
1555 {
1556 struct switchtec_dev *stdev = to_stdev(dev);
1557 struct switchtec_ntb *sndev = stdev->sndev;
1558
1559 if (!sndev)
1560 return;
1561
1562 stdev->link_notifier = NULL;
1563 stdev->sndev = NULL;
1564 ntb_unregister_device(&sndev->ntb);
1565 switchtec_ntb_deinit_db_msg_irq(sndev);
1566 switchtec_ntb_deinit_shared_mw(sndev);
1567 switchtec_ntb_deinit_crosslink(sndev);
1568 kfree(sndev);
1569 dev_info(dev, "ntb device unregistered\n");
1570 }
1571
1572 static struct class_interface switchtec_interface = {
1573 .add_dev = switchtec_ntb_add,
1574 .remove_dev = switchtec_ntb_remove,
1575 };
1576
switchtec_ntb_init(void)1577 static int __init switchtec_ntb_init(void)
1578 {
1579 switchtec_interface.class = switchtec_class;
1580 return class_interface_register(&switchtec_interface);
1581 }
1582 module_init(switchtec_ntb_init);
1583
switchtec_ntb_exit(void)1584 static void __exit switchtec_ntb_exit(void)
1585 {
1586 class_interface_unregister(&switchtec_interface);
1587 }
1588 module_exit(switchtec_ntb_exit);
1589