xref: /linux/drivers/ntb/hw/mscc/ntb_hw_switchtec.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Microsemi Switchtec(tm) PCIe Management Driver
4  * Copyright (c) 2017, Microsemi Corporation
5  */
6 
7 #include <linux/interrupt.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/delay.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/ntb.h>
13 #include <linux/pci.h>
14 #include <linux/switchtec.h>
15 
16 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
17 MODULE_VERSION("0.1");
18 MODULE_LICENSE("GPL");
19 MODULE_AUTHOR("Microsemi Corporation");
20 
21 static ulong max_mw_size = SZ_2M;
22 module_param(max_mw_size, ulong, 0644);
23 MODULE_PARM_DESC(max_mw_size,
24 	"Max memory window size reported to the upper layer");
25 
26 static bool use_lut_mws;
27 module_param(use_lut_mws, bool, 0644);
28 MODULE_PARM_DESC(use_lut_mws,
29 		 "Enable the use of the LUT based memory windows");
30 
31 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
32 #define MAX_MWS     128
33 
34 struct shared_mw {
35 	u32 magic;
36 	u32 link_sta;
37 	u32 partition_id;
38 	u64 mw_sizes[MAX_MWS];
39 	u32 spad[128];
40 };
41 
42 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
43 #define LUT_SIZE SZ_64K
44 
45 struct switchtec_ntb {
46 	struct ntb_dev ntb;
47 	struct switchtec_dev *stdev;
48 
49 	int self_partition;
50 	int peer_partition;
51 
52 	int doorbell_irq;
53 	int message_irq;
54 
55 	struct ntb_info_regs __iomem *mmio_ntb;
56 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
57 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
58 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
59 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
60 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
61 	struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
62 
63 	void __iomem *mmio_xlink_win;
64 
65 	struct shared_mw *self_shared;
66 	struct shared_mw __iomem *peer_shared;
67 	dma_addr_t self_shared_dma;
68 
69 	u64 db_mask;
70 	u64 db_valid_mask;
71 	int db_shift;
72 	int db_peer_shift;
73 
74 	/* synchronize rmw access of db_mask and hw reg */
75 	spinlock_t db_mask_lock;
76 
77 	int nr_direct_mw;
78 	int nr_lut_mw;
79 	int nr_rsvd_luts;
80 	int direct_mw_to_bar[MAX_DIRECT_MW];
81 
82 	int peer_nr_direct_mw;
83 	int peer_nr_lut_mw;
84 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
85 
86 	bool link_is_up;
87 	enum ntb_speed link_speed;
88 	enum ntb_width link_width;
89 	struct work_struct check_link_status_work;
90 	bool link_force_down;
91 };
92 
93 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
94 {
95 	return container_of(ntb, struct switchtec_ntb, ntb);
96 }
97 
98 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
99 				 struct ntb_ctrl_regs __iomem *ctl,
100 				 u32 op, int wait_status)
101 {
102 	static const char * const op_text[] = {
103 		[NTB_CTRL_PART_OP_LOCK] = "lock",
104 		[NTB_CTRL_PART_OP_CFG] = "configure",
105 		[NTB_CTRL_PART_OP_RESET] = "reset",
106 	};
107 
108 	int i;
109 	u32 ps;
110 	int status;
111 
112 	switch (op) {
113 	case NTB_CTRL_PART_OP_LOCK:
114 		status = NTB_CTRL_PART_STATUS_LOCKING;
115 		break;
116 	case NTB_CTRL_PART_OP_CFG:
117 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
118 		break;
119 	case NTB_CTRL_PART_OP_RESET:
120 		status = NTB_CTRL_PART_STATUS_RESETTING;
121 		break;
122 	default:
123 		return -EINVAL;
124 	}
125 
126 	iowrite32(op, &ctl->partition_op);
127 
128 	for (i = 0; i < 1000; i++) {
129 		if (msleep_interruptible(50) != 0) {
130 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
131 			return -EINTR;
132 		}
133 
134 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
135 
136 		if (ps != status)
137 			break;
138 	}
139 
140 	if (ps == wait_status)
141 		return 0;
142 
143 	if (ps == status) {
144 		dev_err(&sndev->stdev->dev,
145 			"Timed out while performing %s (%d). (%08x)\n",
146 			op_text[op], op,
147 			ioread32(&ctl->partition_status));
148 
149 		return -ETIMEDOUT;
150 	}
151 
152 	return -EIO;
153 }
154 
155 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
156 				  u32 val)
157 {
158 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
159 		return -EINVAL;
160 
161 	iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
162 
163 	return 0;
164 }
165 
166 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
167 {
168 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
169 	int nr_direct_mw = sndev->peer_nr_direct_mw;
170 	int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
171 
172 	if (pidx != NTB_DEF_PEER_IDX)
173 		return -EINVAL;
174 
175 	if (!use_lut_mws)
176 		nr_lut_mw = 0;
177 
178 	return nr_direct_mw + nr_lut_mw;
179 }
180 
181 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
182 {
183 	return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
184 }
185 
186 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
187 {
188 	return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
189 }
190 
191 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
192 				      int widx, resource_size_t *addr_align,
193 				      resource_size_t *size_align,
194 				      resource_size_t *size_max)
195 {
196 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
197 	int lut;
198 	resource_size_t size;
199 
200 	if (pidx != NTB_DEF_PEER_IDX)
201 		return -EINVAL;
202 
203 	lut = widx >= sndev->peer_nr_direct_mw;
204 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
205 
206 	if (size == 0)
207 		return -EINVAL;
208 
209 	if (addr_align)
210 		*addr_align = lut ? size : SZ_4K;
211 
212 	if (size_align)
213 		*size_align = lut ? size : SZ_4K;
214 
215 	if (size_max)
216 		*size_max = size;
217 
218 	return 0;
219 }
220 
221 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
222 {
223 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
224 	int bar = sndev->peer_direct_mw_to_bar[idx];
225 	u32 ctl_val;
226 
227 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
228 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
229 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
230 	iowrite32(0, &ctl->bar_entry[bar].win_size);
231 	iowrite32(0, &ctl->bar_ext_entry[bar].win_size);
232 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
233 }
234 
235 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
236 {
237 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
238 
239 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
240 }
241 
242 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
243 					dma_addr_t addr, resource_size_t size)
244 {
245 	int xlate_pos = ilog2(size);
246 	int bar = sndev->peer_direct_mw_to_bar[idx];
247 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
248 	u32 ctl_val;
249 
250 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
251 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
252 
253 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
254 	iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
255 		  &ctl->bar_entry[bar].win_size);
256 	iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
257 	iowrite64(sndev->self_partition | addr,
258 		  &ctl->bar_entry[bar].xlate_addr);
259 }
260 
261 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
262 				     dma_addr_t addr, resource_size_t size)
263 {
264 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
265 
266 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
267 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
268 }
269 
270 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
271 				      dma_addr_t addr, resource_size_t size)
272 {
273 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
274 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
275 	int xlate_pos = ilog2(size);
276 	int nr_direct_mw = sndev->peer_nr_direct_mw;
277 	int rc;
278 
279 	if (pidx != NTB_DEF_PEER_IDX)
280 		return -EINVAL;
281 
282 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
283 		widx, pidx, &addr, &size);
284 
285 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
286 		return -EINVAL;
287 
288 	if (xlate_pos < 12)
289 		return -EINVAL;
290 
291 	if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
292 		/*
293 		 * In certain circumstances we can get a buffer that is
294 		 * not aligned to its size. (Most of the time
295 		 * dma_alloc_coherent ensures this). This can happen when
296 		 * using large buffers allocated by the CMA
297 		 * (see CMA_CONFIG_ALIGNMENT)
298 		 */
299 		dev_err(&sndev->stdev->dev,
300 			"ERROR: Memory window address is not aligned to it's size!\n");
301 		return -EINVAL;
302 	}
303 
304 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
305 				   NTB_CTRL_PART_STATUS_LOCKED);
306 	if (rc)
307 		return rc;
308 
309 	if (size == 0) {
310 		if (widx < nr_direct_mw)
311 			switchtec_ntb_mw_clr_direct(sndev, widx);
312 		else
313 			switchtec_ntb_mw_clr_lut(sndev, widx);
314 	} else {
315 		if (widx < nr_direct_mw)
316 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
317 		else
318 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
319 	}
320 
321 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
322 				   NTB_CTRL_PART_STATUS_NORMAL);
323 
324 	if (rc == -EIO) {
325 		dev_err(&sndev->stdev->dev,
326 			"Hardware reported an error configuring mw %d: %08x\n",
327 			widx, ioread32(&ctl->bar_error));
328 
329 		if (widx < nr_direct_mw)
330 			switchtec_ntb_mw_clr_direct(sndev, widx);
331 		else
332 			switchtec_ntb_mw_clr_lut(sndev, widx);
333 
334 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
335 				      NTB_CTRL_PART_STATUS_NORMAL);
336 	}
337 
338 	return rc;
339 }
340 
341 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
342 {
343 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
344 	int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
345 
346 	return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
347 }
348 
349 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
350 					 int idx, phys_addr_t *base,
351 					 resource_size_t *size)
352 {
353 	int bar = sndev->direct_mw_to_bar[idx];
354 	size_t offset = 0;
355 
356 	if (bar < 0)
357 		return -EINVAL;
358 
359 	if (idx == 0) {
360 		/*
361 		 * This is the direct BAR shared with the LUTs
362 		 * which means the actual window will be offset
363 		 * by the size of all the LUT entries.
364 		 */
365 
366 		offset = LUT_SIZE * sndev->nr_lut_mw;
367 	}
368 
369 	if (base)
370 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
371 
372 	if (size) {
373 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
374 		if (offset && *size > offset)
375 			*size = offset;
376 
377 		if (*size > max_mw_size)
378 			*size = max_mw_size;
379 	}
380 
381 	return 0;
382 }
383 
384 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
385 				      int idx, phys_addr_t *base,
386 				      resource_size_t *size)
387 {
388 	int bar = sndev->direct_mw_to_bar[0];
389 	int offset;
390 
391 	offset = LUT_SIZE * lut_index(sndev, idx);
392 
393 	if (base)
394 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
395 
396 	if (size)
397 		*size = LUT_SIZE;
398 
399 	return 0;
400 }
401 
402 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
403 					  phys_addr_t *base,
404 					  resource_size_t *size)
405 {
406 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
407 
408 	if (idx < sndev->nr_direct_mw)
409 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
410 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
411 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
412 	else
413 		return -EINVAL;
414 }
415 
416 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
417 					  int partition,
418 					  enum ntb_speed *speed,
419 					  enum ntb_width *width)
420 {
421 	struct switchtec_dev *stdev = sndev->stdev;
422 
423 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
424 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
425 
426 	if (speed)
427 		*speed = (linksta >> 16) & 0xF;
428 
429 	if (width)
430 		*width = (linksta >> 20) & 0x3F;
431 }
432 
433 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
434 {
435 	enum ntb_speed self_speed, peer_speed;
436 	enum ntb_width self_width, peer_width;
437 
438 	if (!sndev->link_is_up) {
439 		sndev->link_speed = NTB_SPEED_NONE;
440 		sndev->link_width = NTB_WIDTH_NONE;
441 		return;
442 	}
443 
444 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
445 				      &self_speed, &self_width);
446 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
447 				      &peer_speed, &peer_width);
448 
449 	sndev->link_speed = min(self_speed, peer_speed);
450 	sndev->link_width = min(self_width, peer_width);
451 }
452 
453 static int crosslink_is_enabled(struct switchtec_ntb *sndev)
454 {
455 	struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
456 
457 	return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
458 }
459 
460 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
461 {
462 	int i;
463 	u32 msg_map = 0;
464 
465 	if (!crosslink_is_enabled(sndev))
466 		return;
467 
468 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
469 		int m = i | sndev->self_partition << 2;
470 
471 		msg_map |= m << i * 8;
472 	}
473 
474 	iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
475 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
476 		  &sndev->mmio_peer_dbmsg->odb_mask);
477 }
478 
479 enum switchtec_msg {
480 	LINK_MESSAGE = 0,
481 	MSG_LINK_UP = 1,
482 	MSG_LINK_DOWN = 2,
483 	MSG_CHECK_LINK = 3,
484 	MSG_LINK_FORCE_DOWN = 4,
485 };
486 
487 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
488 
489 static void switchtec_ntb_link_status_update(struct switchtec_ntb *sndev)
490 {
491 	int link_sta;
492 	int old = sndev->link_is_up;
493 
494 	link_sta = sndev->self_shared->link_sta;
495 	if (link_sta) {
496 		u64 peer = ioread64(&sndev->peer_shared->magic);
497 
498 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
499 			link_sta = peer >> 32;
500 		else
501 			link_sta = 0;
502 	}
503 
504 	sndev->link_is_up = link_sta;
505 	switchtec_ntb_set_link_speed(sndev);
506 
507 	if (link_sta != old) {
508 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
509 		ntb_link_event(&sndev->ntb);
510 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
511 			 link_sta ? "up" : "down");
512 
513 		if (link_sta)
514 			crosslink_init_dbmsgs(sndev);
515 	}
516 }
517 
518 static void check_link_status_work(struct work_struct *work)
519 {
520 	struct switchtec_ntb *sndev;
521 
522 	sndev = container_of(work, struct switchtec_ntb,
523 			     check_link_status_work);
524 
525 	if (sndev->link_force_down) {
526 		sndev->link_force_down = false;
527 		switchtec_ntb_reinit_peer(sndev);
528 
529 		if (sndev->link_is_up) {
530 			sndev->link_is_up = 0;
531 			ntb_link_event(&sndev->ntb);
532 			dev_info(&sndev->stdev->dev, "ntb link forced down\n");
533 		}
534 
535 		return;
536 	}
537 
538 	switchtec_ntb_link_status_update(sndev);
539 }
540 
541 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
542 				      enum switchtec_msg msg)
543 {
544 	if (msg == MSG_LINK_FORCE_DOWN)
545 		sndev->link_force_down = true;
546 
547 	schedule_work(&sndev->check_link_status_work);
548 }
549 
550 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
551 {
552 	struct switchtec_ntb *sndev = stdev->sndev;
553 
554 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
555 }
556 
557 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
558 				    enum ntb_speed *speed,
559 				    enum ntb_width *width)
560 {
561 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
562 
563 	if (speed)
564 		*speed = sndev->link_speed;
565 	if (width)
566 		*width = sndev->link_width;
567 
568 	return sndev->link_is_up;
569 }
570 
571 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
572 				     enum ntb_speed max_speed,
573 				     enum ntb_width max_width)
574 {
575 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
576 
577 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
578 
579 	sndev->self_shared->link_sta = 1;
580 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
581 
582 	switchtec_ntb_link_status_update(sndev);
583 
584 	return 0;
585 }
586 
587 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
588 {
589 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
590 
591 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
592 
593 	sndev->self_shared->link_sta = 0;
594 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
595 
596 	switchtec_ntb_link_status_update(sndev);
597 
598 	return 0;
599 }
600 
601 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
602 {
603 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
604 
605 	return sndev->db_valid_mask;
606 }
607 
608 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
609 {
610 	return 1;
611 }
612 
613 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
614 {
615 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
616 
617 	if (db_vector < 0 || db_vector > 1)
618 		return 0;
619 
620 	return sndev->db_valid_mask;
621 }
622 
623 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
624 {
625 	u64 ret;
626 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
627 
628 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
629 
630 	return ret & sndev->db_valid_mask;
631 }
632 
633 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
634 {
635 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
636 
637 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
638 
639 	return 0;
640 }
641 
642 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
643 {
644 	unsigned long irqflags;
645 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
646 
647 	if (db_bits & ~sndev->db_valid_mask)
648 		return -EINVAL;
649 
650 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
651 
652 	sndev->db_mask |= db_bits << sndev->db_shift;
653 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
654 
655 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
656 
657 	return 0;
658 }
659 
660 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
661 {
662 	unsigned long irqflags;
663 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
664 
665 	if (db_bits & ~sndev->db_valid_mask)
666 		return -EINVAL;
667 
668 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
669 
670 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
671 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
672 
673 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
674 
675 	return 0;
676 }
677 
678 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
679 {
680 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
681 
682 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
683 }
684 
685 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
686 				      phys_addr_t *db_addr,
687 				      resource_size_t *db_size,
688 				      u64 *db_data,
689 				      int db_bit)
690 {
691 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
692 	unsigned long offset;
693 
694 	if (unlikely(db_bit >= BITS_PER_LONG_LONG))
695 		return -EINVAL;
696 
697 	offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
698 		(unsigned long)sndev->stdev->mmio;
699 
700 	offset += sndev->db_shift / 8;
701 
702 	if (db_addr)
703 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
704 	if (db_size)
705 		*db_size = sizeof(u32);
706 	if (db_data)
707 		*db_data = BIT_ULL(db_bit) << sndev->db_peer_shift;
708 
709 	return 0;
710 }
711 
712 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
713 {
714 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
715 
716 	iowrite64(db_bits << sndev->db_peer_shift,
717 		  &sndev->mmio_peer_dbmsg->odb);
718 
719 	return 0;
720 }
721 
722 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
723 {
724 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
725 
726 	return ARRAY_SIZE(sndev->self_shared->spad);
727 }
728 
729 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
730 {
731 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
732 
733 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
734 		return 0;
735 
736 	if (!sndev->self_shared)
737 		return 0;
738 
739 	return sndev->self_shared->spad[idx];
740 }
741 
742 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
743 {
744 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
745 
746 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
747 		return -EINVAL;
748 
749 	if (!sndev->self_shared)
750 		return -EIO;
751 
752 	sndev->self_shared->spad[idx] = val;
753 
754 	return 0;
755 }
756 
757 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
758 					int sidx)
759 {
760 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
761 
762 	if (pidx != NTB_DEF_PEER_IDX)
763 		return -EINVAL;
764 
765 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
766 		return 0;
767 
768 	if (!sndev->peer_shared)
769 		return 0;
770 
771 	return ioread32(&sndev->peer_shared->spad[sidx]);
772 }
773 
774 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
775 					 int sidx, u32 val)
776 {
777 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
778 
779 	if (pidx != NTB_DEF_PEER_IDX)
780 		return -EINVAL;
781 
782 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
783 		return -EINVAL;
784 
785 	if (!sndev->peer_shared)
786 		return -EIO;
787 
788 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
789 
790 	return 0;
791 }
792 
793 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
794 					int sidx, phys_addr_t *spad_addr)
795 {
796 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
797 	unsigned long offset;
798 
799 	if (pidx != NTB_DEF_PEER_IDX)
800 		return -EINVAL;
801 
802 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
803 		(unsigned long)sndev->stdev->mmio;
804 
805 	if (spad_addr)
806 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
807 
808 	return 0;
809 }
810 
811 static const struct ntb_dev_ops switchtec_ntb_ops = {
812 	.mw_count		= switchtec_ntb_mw_count,
813 	.mw_get_align		= switchtec_ntb_mw_get_align,
814 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
815 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
816 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
817 	.link_is_up		= switchtec_ntb_link_is_up,
818 	.link_enable		= switchtec_ntb_link_enable,
819 	.link_disable		= switchtec_ntb_link_disable,
820 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
821 	.db_vector_count	= switchtec_ntb_db_vector_count,
822 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
823 	.db_read		= switchtec_ntb_db_read,
824 	.db_clear		= switchtec_ntb_db_clear,
825 	.db_set_mask		= switchtec_ntb_db_set_mask,
826 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
827 	.db_read_mask		= switchtec_ntb_db_read_mask,
828 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
829 	.peer_db_set		= switchtec_ntb_peer_db_set,
830 	.spad_count		= switchtec_ntb_spad_count,
831 	.spad_read		= switchtec_ntb_spad_read,
832 	.spad_write		= switchtec_ntb_spad_write,
833 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
834 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
835 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
836 };
837 
838 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
839 {
840 	u64 tpart_vec;
841 	int self;
842 	u64 part_map;
843 	int bit;
844 
845 	sndev->ntb.pdev = sndev->stdev->pdev;
846 	sndev->ntb.topo = NTB_TOPO_SWITCH;
847 	sndev->ntb.ops = &switchtec_ntb_ops;
848 
849 	INIT_WORK(&sndev->check_link_status_work, check_link_status_work);
850 	sndev->link_force_down = false;
851 
852 	sndev->self_partition = sndev->stdev->partition;
853 
854 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
855 
856 	self = sndev->self_partition;
857 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
858 	tpart_vec <<= 32;
859 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
860 
861 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
862 	part_map &= ~(1 << sndev->self_partition);
863 
864 	if (!ffs(tpart_vec)) {
865 		if (sndev->stdev->partition_count != 2) {
866 			dev_err(&sndev->stdev->dev,
867 				"ntb target partition not defined\n");
868 			return -ENODEV;
869 		}
870 
871 		bit = ffs(part_map);
872 		if (!bit) {
873 			dev_err(&sndev->stdev->dev,
874 				"peer partition is not NT partition\n");
875 			return -ENODEV;
876 		}
877 
878 		sndev->peer_partition = bit - 1;
879 	} else {
880 		if (ffs(tpart_vec) != fls(tpart_vec)) {
881 			dev_err(&sndev->stdev->dev,
882 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
883 			return -ENODEV;
884 		}
885 
886 		sndev->peer_partition = ffs(tpart_vec) - 1;
887 		if (!(part_map & (1ULL << sndev->peer_partition))) {
888 			dev_err(&sndev->stdev->dev,
889 				"ntb target partition is not NT partition\n");
890 			return -ENODEV;
891 		}
892 	}
893 
894 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
895 		sndev->self_partition, sndev->stdev->partition_count);
896 
897 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
898 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
899 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
900 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
901 
902 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
903 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
904 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
905 	sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
906 
907 	return 0;
908 }
909 
910 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
911 			       struct ntb_ctrl_regs __iomem *ctl,
912 			       int lut_idx, int partition, u64 addr)
913 {
914 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
915 	u32 ctl_val;
916 	int rc;
917 
918 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
919 				   NTB_CTRL_PART_STATUS_LOCKED);
920 	if (rc)
921 		return rc;
922 
923 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
924 	ctl_val &= 0xFF;
925 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
926 	ctl_val |= ilog2(LUT_SIZE) << 8;
927 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
928 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
929 
930 	iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
931 		  &ctl->lut_entry[lut_idx]);
932 
933 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
934 				   NTB_CTRL_PART_STATUS_NORMAL);
935 	if (rc) {
936 		u32 bar_error, lut_error;
937 
938 		bar_error = ioread32(&ctl->bar_error);
939 		lut_error = ioread32(&ctl->lut_error);
940 		dev_err(&sndev->stdev->dev,
941 			"Error setting up reserved lut window: %08x / %08x\n",
942 			bar_error, lut_error);
943 		return rc;
944 	}
945 
946 	return 0;
947 }
948 
949 static int config_req_id_table(struct switchtec_ntb *sndev,
950 			       struct ntb_ctrl_regs __iomem *mmio_ctrl,
951 			       int *req_ids, int count)
952 {
953 	int i, rc = 0;
954 	u32 error;
955 	u32 proxy_id;
956 
957 	if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
958 		dev_err(&sndev->stdev->dev,
959 			"Not enough requester IDs available.\n");
960 		return -EFAULT;
961 	}
962 
963 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
964 				   NTB_CTRL_PART_OP_LOCK,
965 				   NTB_CTRL_PART_STATUS_LOCKED);
966 	if (rc)
967 		return rc;
968 
969 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
970 		  &mmio_ctrl->partition_ctrl);
971 
972 	for (i = 0; i < count; i++) {
973 		iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
974 			  &mmio_ctrl->req_id_table[i]);
975 
976 		proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
977 		dev_dbg(&sndev->stdev->dev,
978 			"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
979 			req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
980 			req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
981 			(proxy_id >> 1) & 0x7);
982 	}
983 
984 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
985 				   NTB_CTRL_PART_OP_CFG,
986 				   NTB_CTRL_PART_STATUS_NORMAL);
987 
988 	if (rc == -EIO) {
989 		error = ioread32(&mmio_ctrl->req_id_error);
990 		dev_err(&sndev->stdev->dev,
991 			"Error setting up the requester ID table: %08x\n",
992 			error);
993 	}
994 
995 	return 0;
996 }
997 
998 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
999 			       u64 *mw_addrs, int mw_count)
1000 {
1001 	int rc, i;
1002 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
1003 	u64 addr;
1004 	size_t size, offset;
1005 	int bar;
1006 	int xlate_pos;
1007 	u32 ctl_val;
1008 
1009 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
1010 				   NTB_CTRL_PART_STATUS_LOCKED);
1011 	if (rc)
1012 		return rc;
1013 
1014 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1015 		if (i == ntb_lut_idx)
1016 			continue;
1017 
1018 		addr = mw_addrs[0] + LUT_SIZE * i;
1019 
1020 		iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
1021 			   addr),
1022 			  &ctl->lut_entry[i]);
1023 	}
1024 
1025 	sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
1026 
1027 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1028 		bar = sndev->direct_mw_to_bar[i];
1029 		offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
1030 		addr = mw_addrs[i] + offset;
1031 		size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
1032 		xlate_pos = ilog2(size);
1033 
1034 		if (offset && size > offset)
1035 			size = offset;
1036 
1037 		ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
1038 		ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
1039 
1040 		iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
1041 		iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
1042 			  &ctl->bar_entry[bar].win_size);
1043 		iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
1044 		iowrite64(sndev->peer_partition | addr,
1045 			  &ctl->bar_entry[bar].xlate_addr);
1046 	}
1047 
1048 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1049 				   NTB_CTRL_PART_STATUS_NORMAL);
1050 	if (rc) {
1051 		u32 bar_error, lut_error;
1052 
1053 		bar_error = ioread32(&ctl->bar_error);
1054 		lut_error = ioread32(&ctl->lut_error);
1055 		dev_err(&sndev->stdev->dev,
1056 			"Error setting up cross link windows: %08x / %08x\n",
1057 			bar_error, lut_error);
1058 		return rc;
1059 	}
1060 
1061 	return 0;
1062 }
1063 
1064 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
1065 	struct ntb_ctrl_regs __iomem *mmio_ctrl)
1066 {
1067 	int req_ids[16];
1068 	int i;
1069 	u32 proxy_id;
1070 
1071 	for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
1072 		proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
1073 
1074 		if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
1075 			break;
1076 
1077 		req_ids[i] = ((proxy_id >> 1) & 0xFF);
1078 	}
1079 
1080 	return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
1081 }
1082 
1083 /*
1084  * In crosslink configuration there is a virtual partition in the
1085  * middle of the two switches. The BARs in this partition have to be
1086  * enumerated and assigned addresses.
1087  */
1088 static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1089 				    u64 *bar_addrs)
1090 {
1091 	struct part_cfg_regs __iomem *part_cfg =
1092 		&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
1093 	u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
1094 	struct pff_csr_regs __iomem *mmio_pff =
1095 		&sndev->stdev->mmio_pff_csr[pff];
1096 	const u64 bar_space = 0x1000000000LL;
1097 	u64 bar_addr;
1098 	int bar_cnt = 0;
1099 	int i;
1100 
1101 	iowrite16(0x6, &mmio_pff->pcicmd);
1102 
1103 	for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
1104 		iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
1105 		bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
1106 		bar_addr &= ~0xf;
1107 
1108 		dev_dbg(&sndev->stdev->dev,
1109 			"Crosslink BAR%d addr: %llx\n",
1110 			i*2, bar_addr);
1111 
1112 		if (bar_addr != bar_space * i)
1113 			continue;
1114 
1115 		bar_addrs[bar_cnt++] = bar_addr;
1116 	}
1117 
1118 	return bar_cnt;
1119 }
1120 
1121 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
1122 {
1123 	int rc;
1124 	int bar = sndev->direct_mw_to_bar[0];
1125 	const int ntb_lut_idx = 1;
1126 	u64 bar_addrs[6];
1127 	u64 addr;
1128 	int offset;
1129 	int bar_cnt;
1130 
1131 	if (!crosslink_is_enabled(sndev))
1132 		return 0;
1133 
1134 	dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
1135 	sndev->ntb.topo = NTB_TOPO_CROSSLINK;
1136 
1137 	bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
1138 	if (bar_cnt < sndev->nr_direct_mw + 1) {
1139 		dev_err(&sndev->stdev->dev,
1140 			"Error enumerating crosslink partition\n");
1141 		return -EINVAL;
1142 	}
1143 
1144 	addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
1145 		SWITCHTEC_NTB_REG_DBMSG_OFFSET +
1146 		sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
1147 
1148 	offset = addr & (LUT_SIZE - 1);
1149 	addr -= offset;
1150 
1151 	rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
1152 				 sndev->peer_partition, addr);
1153 	if (rc)
1154 		return rc;
1155 
1156 	rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
1157 				 bar_cnt - 1);
1158 	if (rc)
1159 		return rc;
1160 
1161 	rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
1162 	if (rc)
1163 		return rc;
1164 
1165 	sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
1166 						LUT_SIZE, LUT_SIZE);
1167 	if (!sndev->mmio_xlink_win) {
1168 		rc = -ENOMEM;
1169 		return rc;
1170 	}
1171 
1172 	sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
1173 	sndev->nr_rsvd_luts++;
1174 
1175 	crosslink_init_dbmsgs(sndev);
1176 
1177 	return 0;
1178 }
1179 
1180 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
1181 {
1182 	if (sndev->mmio_xlink_win)
1183 		pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
1184 }
1185 
1186 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
1187 {
1188 	int i;
1189 	int cnt = 0;
1190 
1191 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
1192 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
1193 
1194 		if (r & NTB_CTRL_BAR_VALID)
1195 			map[cnt++] = i;
1196 	}
1197 
1198 	return cnt;
1199 }
1200 
1201 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
1202 {
1203 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
1204 				       sndev->mmio_self_ctrl);
1205 
1206 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
1207 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
1208 
1209 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
1210 		sndev->nr_direct_mw, sndev->nr_lut_mw);
1211 
1212 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
1213 					    sndev->mmio_peer_ctrl);
1214 
1215 	sndev->peer_nr_lut_mw =
1216 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
1217 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
1218 
1219 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
1220 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
1221 
1222 }
1223 
1224 /*
1225  * There are 64 doorbells in the switch hardware but this is
1226  * shared among all partitions. So we must split them in half
1227  * (32 for each partition). However, the message interrupts are
1228  * also shared with the top 4 doorbells so we just limit this to
1229  * 28 doorbells per partition.
1230  *
1231  * In crosslink mode, each side has it's own dbmsg register so
1232  * they can each use all 60 of the available doorbells.
1233  */
1234 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
1235 {
1236 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
1237 
1238 	if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
1239 		sndev->db_shift = 0;
1240 		sndev->db_peer_shift = 0;
1241 		sndev->db_valid_mask = sndev->db_mask;
1242 	} else if (sndev->self_partition < sndev->peer_partition) {
1243 		sndev->db_shift = 0;
1244 		sndev->db_peer_shift = 32;
1245 		sndev->db_valid_mask = 0x0FFFFFFF;
1246 	} else {
1247 		sndev->db_shift = 32;
1248 		sndev->db_peer_shift = 0;
1249 		sndev->db_valid_mask = 0x0FFFFFFF;
1250 	}
1251 
1252 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
1253 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
1254 		  &sndev->mmio_peer_dbmsg->odb_mask);
1255 
1256 	dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
1257 		sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
1258 }
1259 
1260 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1261 {
1262 	int i;
1263 	u32 msg_map = 0;
1264 
1265 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1266 		int m = i | sndev->peer_partition << 2;
1267 
1268 		msg_map |= m << i * 8;
1269 	}
1270 
1271 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1272 
1273 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1274 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1275 			  &sndev->mmio_self_dbmsg->imsg[i]);
1276 }
1277 
1278 static int
1279 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1280 {
1281 	int req_ids[2];
1282 
1283 	/*
1284 	 * Root Complex Requester ID (which is 0:00.0)
1285 	 */
1286 	req_ids[0] = 0;
1287 
1288 	/*
1289 	 * Host Bridge Requester ID (as read from the mmap address)
1290 	 */
1291 	req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1292 
1293 	return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1294 				   ARRAY_SIZE(req_ids));
1295 }
1296 
1297 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1298 {
1299 	int i;
1300 
1301 	memset(sndev->self_shared, 0, LUT_SIZE);
1302 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1303 	sndev->self_shared->partition_id = sndev->stdev->partition;
1304 
1305 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1306 		int bar = sndev->direct_mw_to_bar[i];
1307 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1308 
1309 		if (i == 0)
1310 			sz = min_t(resource_size_t, sz,
1311 				   LUT_SIZE * sndev->nr_lut_mw);
1312 
1313 		sndev->self_shared->mw_sizes[i] = sz;
1314 	}
1315 
1316 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1317 		int idx = sndev->nr_direct_mw + i;
1318 
1319 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1320 	}
1321 }
1322 
1323 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1324 {
1325 	int self_bar = sndev->direct_mw_to_bar[0];
1326 	int rc;
1327 
1328 	sndev->nr_rsvd_luts++;
1329 	sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
1330 						LUT_SIZE,
1331 						&sndev->self_shared_dma,
1332 						GFP_KERNEL);
1333 	if (!sndev->self_shared) {
1334 		dev_err(&sndev->stdev->dev,
1335 			"unable to allocate memory for shared mw\n");
1336 		return -ENOMEM;
1337 	}
1338 
1339 	switchtec_ntb_init_shared(sndev);
1340 
1341 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1342 				 sndev->self_partition,
1343 				 sndev->self_shared_dma);
1344 	if (rc)
1345 		goto unalloc_and_exit;
1346 
1347 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1348 	if (!sndev->peer_shared) {
1349 		rc = -ENOMEM;
1350 		goto unalloc_and_exit;
1351 	}
1352 
1353 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1354 	return 0;
1355 
1356 unalloc_and_exit:
1357 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1358 			  sndev->self_shared, sndev->self_shared_dma);
1359 
1360 	return rc;
1361 }
1362 
1363 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1364 {
1365 	if (sndev->peer_shared)
1366 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1367 
1368 	if (sndev->self_shared)
1369 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1370 				  sndev->self_shared,
1371 				  sndev->self_shared_dma);
1372 	sndev->nr_rsvd_luts--;
1373 }
1374 
1375 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1376 {
1377 	struct switchtec_ntb *sndev = dev;
1378 
1379 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1380 
1381 	ntb_db_event(&sndev->ntb, 0);
1382 
1383 	return IRQ_HANDLED;
1384 }
1385 
1386 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1387 {
1388 	int i;
1389 	struct switchtec_ntb *sndev = dev;
1390 
1391 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1392 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1393 
1394 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1395 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1396 				i, (u32)msg);
1397 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1398 
1399 			if (i == LINK_MESSAGE)
1400 				switchtec_ntb_check_link(sndev, msg);
1401 		}
1402 	}
1403 
1404 	return IRQ_HANDLED;
1405 }
1406 
1407 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1408 {
1409 	int i;
1410 	int rc;
1411 	int doorbell_irq = 0;
1412 	int message_irq = 0;
1413 	int event_irq;
1414 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1415 
1416 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1417 
1418 	while (doorbell_irq == event_irq)
1419 		doorbell_irq++;
1420 	while (message_irq == doorbell_irq ||
1421 	       message_irq == event_irq)
1422 		message_irq++;
1423 
1424 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1425 		event_irq, doorbell_irq, message_irq);
1426 
1427 	for (i = 0; i < idb_vecs - 4; i++)
1428 		iowrite8(doorbell_irq,
1429 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1430 
1431 	for (; i < idb_vecs; i++)
1432 		iowrite8(message_irq,
1433 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1434 
1435 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1436 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1437 
1438 	rc = request_irq(sndev->doorbell_irq,
1439 			 switchtec_ntb_doorbell_isr, 0,
1440 			 "switchtec_ntb_doorbell", sndev);
1441 	if (rc)
1442 		return rc;
1443 
1444 	rc = request_irq(sndev->message_irq,
1445 			 switchtec_ntb_message_isr, 0,
1446 			 "switchtec_ntb_message", sndev);
1447 	if (rc) {
1448 		free_irq(sndev->doorbell_irq, sndev);
1449 		return rc;
1450 	}
1451 
1452 	return 0;
1453 }
1454 
1455 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1456 {
1457 	free_irq(sndev->doorbell_irq, sndev);
1458 	free_irq(sndev->message_irq, sndev);
1459 }
1460 
1461 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
1462 {
1463 	int rc;
1464 
1465 	if (crosslink_is_enabled(sndev))
1466 		return 0;
1467 
1468 	dev_info(&sndev->stdev->dev, "reinitialize shared memory window\n");
1469 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1470 				 sndev->self_partition,
1471 				 sndev->self_shared_dma);
1472 	return rc;
1473 }
1474 
1475 static int switchtec_ntb_add(struct device *dev,
1476 			     struct class_interface *class_intf)
1477 {
1478 	struct switchtec_dev *stdev = to_stdev(dev);
1479 	struct switchtec_ntb *sndev;
1480 	int rc;
1481 
1482 	stdev->sndev = NULL;
1483 
1484 	if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
1485 		return -ENODEV;
1486 
1487 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1488 	if (!sndev)
1489 		return -ENOMEM;
1490 
1491 	sndev->stdev = stdev;
1492 	rc = switchtec_ntb_init_sndev(sndev);
1493 	if (rc)
1494 		goto free_and_exit;
1495 
1496 	switchtec_ntb_init_mw(sndev);
1497 
1498 	rc = switchtec_ntb_init_req_id_table(sndev);
1499 	if (rc)
1500 		goto free_and_exit;
1501 
1502 	rc = switchtec_ntb_init_crosslink(sndev);
1503 	if (rc)
1504 		goto free_and_exit;
1505 
1506 	switchtec_ntb_init_db(sndev);
1507 	switchtec_ntb_init_msgs(sndev);
1508 
1509 	rc = switchtec_ntb_init_shared_mw(sndev);
1510 	if (rc)
1511 		goto deinit_crosslink;
1512 
1513 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1514 	if (rc)
1515 		goto deinit_shared_and_exit;
1516 
1517 	/*
1518 	 * If this host crashed, the other host may think the link is
1519 	 * still up. Tell them to force it down (it will go back up
1520 	 * once we register the ntb device).
1521 	 */
1522 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
1523 
1524 	rc = ntb_register_device(&sndev->ntb);
1525 	if (rc)
1526 		goto deinit_and_exit;
1527 
1528 	stdev->sndev = sndev;
1529 	stdev->link_notifier = switchtec_ntb_link_notification;
1530 	dev_info(dev, "NTB device registered\n");
1531 
1532 	return 0;
1533 
1534 deinit_and_exit:
1535 	switchtec_ntb_deinit_db_msg_irq(sndev);
1536 deinit_shared_and_exit:
1537 	switchtec_ntb_deinit_shared_mw(sndev);
1538 deinit_crosslink:
1539 	switchtec_ntb_deinit_crosslink(sndev);
1540 free_and_exit:
1541 	kfree(sndev);
1542 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1543 	return rc;
1544 }
1545 
1546 static void switchtec_ntb_remove(struct device *dev,
1547 				 struct class_interface *class_intf)
1548 {
1549 	struct switchtec_dev *stdev = to_stdev(dev);
1550 	struct switchtec_ntb *sndev = stdev->sndev;
1551 
1552 	if (!sndev)
1553 		return;
1554 
1555 	stdev->link_notifier = NULL;
1556 	stdev->sndev = NULL;
1557 	ntb_unregister_device(&sndev->ntb);
1558 	switchtec_ntb_deinit_db_msg_irq(sndev);
1559 	switchtec_ntb_deinit_shared_mw(sndev);
1560 	switchtec_ntb_deinit_crosslink(sndev);
1561 	kfree(sndev);
1562 	dev_info(dev, "ntb device unregistered\n");
1563 }
1564 
1565 static struct class_interface switchtec_interface  = {
1566 	.add_dev = switchtec_ntb_add,
1567 	.remove_dev = switchtec_ntb_remove,
1568 };
1569 
1570 static int __init switchtec_ntb_init(void)
1571 {
1572 	switchtec_interface.class = switchtec_class;
1573 	return class_interface_register(&switchtec_interface);
1574 }
1575 module_init(switchtec_ntb_init);
1576 
1577 static void __exit switchtec_ntb_exit(void)
1578 {
1579 	class_interface_unregister(&switchtec_interface);
1580 }
1581 module_exit(switchtec_ntb_exit);
1582