xref: /freebsd/sys/dev/ntb/ntb_hw/ntb_hw_amd.c (revision a3557ef0)
1 /*-
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright (C) 2019 Advanced Micro Devices, Inc.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * BSD LICENSE
14  *
15  * Copyright (c) 2019 Advanced Micro Devices, Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of AMD corporation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * Contact Information :
42  * Rajesh Kumar <rajesh1.kumar@amd.com>
43  */
44 
45 /*
46  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
47  * two or more systems using a PCI-e links, providing remote memory access.
48  *
49  * This module contains a driver for NTB hardware in AMD CPUs
50  *
51  * Much of the code in this module is shared with Linux. Any patches may
52  * be picked up and redistributed in Linux with a dual GPL/BSD license.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include <sys/param.h>
59 #include <sys/kernel.h>
60 #include <sys/systm.h>
61 #include <sys/bus.h>
62 #include <sys/lock.h>
63 #include <sys/malloc.h>
64 #include <sys/module.h>
65 #include <sys/mutex.h>
66 #include <sys/rman.h>
67 #include <sys/sbuf.h>
68 #include <sys/sysctl.h>
69 
70 #include <vm/vm.h>
71 #include <vm/pmap.h>
72 
73 #include <machine/bus.h>
74 
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77 
78 #include "ntb_hw_amd.h"
79 #include "dev/ntb/ntb.h"
80 
81 MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations");
82 
83 static const struct amd_ntb_hw_info amd_ntb_hw_info_list[] = {
84 
85 	{ .vendor_id = NTB_HW_AMD_VENDOR_ID,
86 	  .device_id = NTB_HW_AMD_DEVICE_ID1,
87 	  .mw_count = 3,
88 	  .bar_start_idx = 1,
89 	  .spad_count = 16,
90 	  .db_count = 16,
91 	  .msix_vector_count = 24,
92 	  .quirks = QUIRK_MW0_32BIT,
93 	  .desc = "AMD Non-Transparent Bridge"},
94 
95 	{ .vendor_id = NTB_HW_AMD_VENDOR_ID,
96 	  .device_id = NTB_HW_AMD_DEVICE_ID2,
97 	  .mw_count = 2,
98 	  .bar_start_idx = 2,
99 	  .spad_count = 16,
100 	  .db_count = 16,
101 	  .msix_vector_count = 24,
102 	  .quirks = 0,
103 	  .desc = "AMD Non-Transparent Bridge"},
104 
105 	{ .vendor_id = NTB_HW_HYGON_VENDOR_ID,
106 	  .device_id = NTB_HW_HYGON_DEVICE_ID1,
107 	  .mw_count = 3,
108 	  .bar_start_idx = 1,
109 	  .spad_count = 16,
110 	  .db_count = 16,
111 	  .msix_vector_count = 24,
112 	  .quirks = QUIRK_MW0_32BIT,
113 	  .desc = "Hygon Non-Transparent Bridge"},
114 };
115 
116 static const struct pci_device_table amd_ntb_devs[] = {
117 	{ PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID1),
118 	  .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0],
119 	  PCI_DESCR("AMD Non-Transparent Bridge") },
120 	{ PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID2),
121 	  .driver_data = (uintptr_t)&amd_ntb_hw_info_list[1],
122 	  PCI_DESCR("AMD Non-Transparent Bridge") },
123 	{ PCI_DEV(NTB_HW_HYGON_VENDOR_ID, NTB_HW_HYGON_DEVICE_ID1),
124 	  .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0],
125 	  PCI_DESCR("Hygon Non-Transparent Bridge") }
126 };
127 
128 static unsigned g_amd_ntb_hw_debug_level;
129 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
130     &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose");
131 
132 #define amd_ntb_printf(lvl, ...) do {				\
133         if (lvl <= g_amd_ntb_hw_debug_level)			\
134                 device_printf(ntb->device, __VA_ARGS__);	\
135 } while (0)
136 
137 #ifdef __i386__
138 static __inline uint64_t
139 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
140     bus_size_t offset)
141 {
142 
143 	return (bus_space_read_4(tag, handle, offset) |
144 	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
145 }
146 
147 static __inline void
148 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
149     bus_size_t offset, uint64_t val)
150 {
151 
152 	bus_space_write_4(tag, handle, offset, val);
153 	bus_space_write_4(tag, handle, offset + 4, val >> 32);
154 }
155 #endif
156 
157 /*
158  * AMD NTB INTERFACE ROUTINES
159  */
160 static int
161 amd_ntb_port_number(device_t dev)
162 {
163 	struct amd_ntb_softc *ntb = device_get_softc(dev);
164 
165 	amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type);
166 
167 	switch (ntb->conn_type) {
168 	case NTB_CONN_PRI:
169 		return (NTB_PORT_PRI_USD);
170 	case NTB_CONN_SEC:
171 		return (NTB_PORT_SEC_DSD);
172 	default:
173 		break;
174 	}
175 
176 	return (-EINVAL);
177 }
178 
179 static int
180 amd_ntb_peer_port_count(device_t dev)
181 {
182 	struct amd_ntb_softc *ntb = device_get_softc(dev);
183 
184 	amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT);
185 	return (NTB_DEF_PEER_CNT);
186 }
187 
188 static int
189 amd_ntb_peer_port_number(device_t dev, int pidx)
190 {
191 	struct amd_ntb_softc *ntb = device_get_softc(dev);
192 
193 	amd_ntb_printf(1, "%s: pidx %d conn type %d\n",
194 	    __func__, pidx, ntb->conn_type);
195 
196 	if (pidx != NTB_DEF_PEER_IDX)
197 		return (-EINVAL);
198 
199 	switch (ntb->conn_type) {
200 	case NTB_CONN_PRI:
201 		return (NTB_PORT_SEC_DSD);
202 	case NTB_CONN_SEC:
203 		return (NTB_PORT_PRI_USD);
204 	default:
205 		break;
206 	}
207 
208 	return (-EINVAL);
209 }
210 
211 static int
212 amd_ntb_peer_port_idx(device_t dev, int port)
213 {
214 	struct amd_ntb_softc *ntb = device_get_softc(dev);
215 	int peer_port;
216 
217 	peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX);
218 
219 	amd_ntb_printf(1, "%s: port %d peer_port %d\n",
220 	    __func__, port, peer_port);
221 
222 	if (peer_port == -EINVAL || port != peer_port)
223 		return (-EINVAL);
224 
225 	return (0);
226 }
227 
228 /*
229  * AMD NTB INTERFACE - LINK ROUTINES
230  */
231 static inline int
232 amd_link_is_up(struct amd_ntb_softc *ntb)
233 {
234 
235 	amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n",
236 	    __func__, ntb->peer_sta, ntb->cntl_sta);
237 
238 	if (!ntb->peer_sta)
239 		return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta));
240 
241 	return (0);
242 }
243 
244 static inline enum ntb_speed
245 amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb)
246 {
247 
248 	if (!amd_link_is_up(ntb))
249 		return (NTB_SPEED_NONE);
250 
251 	return (NTB_LNK_STA_SPEED(ntb->lnk_sta));
252 }
253 
254 static inline enum ntb_width
255 amd_ntb_link_sta_width(struct amd_ntb_softc *ntb)
256 {
257 
258 	if (!amd_link_is_up(ntb))
259 		return (NTB_WIDTH_NONE);
260 
261 	return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
262 }
263 
264 static bool
265 amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
266 {
267 	struct amd_ntb_softc *ntb = device_get_softc(dev);
268 
269 	if (speed != NULL)
270 		*speed = amd_ntb_link_sta_speed(ntb);
271 	if (width != NULL)
272 		*width = amd_ntb_link_sta_width(ntb);
273 
274 	return (amd_link_is_up(ntb));
275 }
276 
277 static int
278 amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed,
279     enum ntb_width max_width)
280 {
281 	struct amd_ntb_softc *ntb = device_get_softc(dev);
282 	uint32_t ntb_ctl;
283 
284 	amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
285 	    __func__, ntb->int_mask, ntb->conn_type);
286 
287 	amd_init_side_info(ntb);
288 
289 	/* Enable event interrupt */
290 	ntb->int_mask &= ~AMD_EVENT_INTMASK;
291 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
292 
293 	if (ntb->conn_type == NTB_CONN_SEC)
294 		return (EINVAL);
295 
296 	amd_ntb_printf(0, "%s: Enabling Link.\n", __func__);
297 
298 	ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
299 	ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
300 	amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
301 	amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
302 
303 	return (0);
304 }
305 
306 static int
307 amd_ntb_link_disable(device_t dev)
308 {
309 	struct amd_ntb_softc *ntb = device_get_softc(dev);
310 	uint32_t ntb_ctl;
311 
312 	amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
313 	    __func__, ntb->int_mask, ntb->conn_type);
314 
315 	amd_deinit_side_info(ntb);
316 
317 	/* Disable event interrupt */
318 	ntb->int_mask |= AMD_EVENT_INTMASK;
319 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
320 
321 	if (ntb->conn_type == NTB_CONN_SEC)
322 		return (EINVAL);
323 
324 	amd_ntb_printf(0, "%s: Disabling Link.\n", __func__);
325 
326 	ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
327 	ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
328 	amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
329 	amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
330 
331 	return (0);
332 }
333 
334 /*
335  * AMD NTB memory window routines
336  */
337 static uint8_t
338 amd_ntb_mw_count(device_t dev)
339 {
340 	struct amd_ntb_softc *ntb = device_get_softc(dev);
341 
342 	return (ntb->hw_info->mw_count);
343 }
344 
345 static int
346 amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
347     caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
348     bus_addr_t *plimit)
349 {
350 	struct amd_ntb_softc *ntb = device_get_softc(dev);
351 	struct amd_ntb_pci_bar_info *bar_info;
352 
353 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
354 		return (EINVAL);
355 
356 	bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx];
357 
358 	if (base != NULL)
359 		*base = bar_info->pbase;
360 
361 	if (vbase != NULL)
362 		*vbase = bar_info->vbase;
363 
364 	if (align != NULL)
365 		*align = bar_info->size;
366 
367 	if (size != NULL)
368 		*size = bar_info->size;
369 
370 	if (align_size != NULL)
371 		*align_size = 1;
372 
373 	if (plimit != NULL) {
374 		/*
375 		 * For Device ID 0x145B (which has 3 memory windows),
376 		 * memory window 0 use a 32-bit bar. The remaining
377 		 * cases all use 64-bit bar.
378 		 */
379 		if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT))
380 			*plimit = BUS_SPACE_MAXADDR_32BIT;
381 		else
382 			*plimit = BUS_SPACE_MAXADDR;
383 	}
384 
385 	return (0);
386 }
387 
388 static int
389 amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size)
390 {
391 	struct amd_ntb_softc *ntb = device_get_softc(dev);
392 	struct amd_ntb_pci_bar_info *bar_info;
393 
394 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
395 		return (EINVAL);
396 
397 	bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx];
398 
399 	/* Make sure the range fits in the usable mw size. */
400 	if (size > bar_info->size) {
401 		amd_ntb_printf(0, "%s: size 0x%jx greater than mw_size 0x%jx\n",
402 		    __func__, (uintmax_t)size, (uintmax_t)bar_info->size);
403 		return (EINVAL);
404 	}
405 
406 	amd_ntb_printf(1, "%s: mw %d mw_size 0x%jx size 0x%jx base %p\n",
407 	    __func__, mw_idx, (uintmax_t)bar_info->size,
408 	    (uintmax_t)size, (void *)bar_info->pci_bus_handle);
409 
410 	/*
411 	 * AMD NTB XLAT and Limit registers needs to be written only after
412 	 * link enable.
413 	 *
414 	 * Set and verify setting the translation address register.
415 	 */
416 	amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr);
417 	amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%jx addr %p\n",
418 	    __func__, mw_idx, bar_info->xlat_off,
419 	    amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr);
420 
421 	/*
422 	 * Set and verify setting the limit register.
423 	 *
424 	 * For Device ID 0x145B (which has 3 memory windows),
425 	 * memory window 0 use a 32-bit bar. The remaining
426 	 * cases all use 64-bit bar.
427 	 */
428 	if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) {
429 		amd_ntb_reg_write(4, bar_info->limit_off, (uint32_t)size);
430 		amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n",
431 		    __func__, bar_info->limit_off,
432 		    amd_ntb_peer_reg_read(4, bar_info->limit_off),
433 		    (uint32_t)size);
434 	} else {
435 		amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size);
436 		amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%jx limit 0x%jx\n",
437 		    __func__, bar_info->limit_off,
438 		    amd_ntb_peer_reg_read(8, bar_info->limit_off),
439 		    (uintmax_t)size);
440 	}
441 
442 	return (0);
443 }
444 
445 static int
446 amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
447 {
448 	struct amd_ntb_softc *ntb = device_get_softc(dev);
449 
450 	amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
451 
452 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
453 		return (EINVAL);
454 
455 	return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0));
456 }
457 
458 static int
459 amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode)
460 {
461 	struct amd_ntb_softc *ntb = device_get_softc(dev);
462 	struct amd_ntb_pci_bar_info *bar_info;
463 	int rc;
464 
465 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
466 		return (EINVAL);
467 
468 	bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx];
469 	if (mode == bar_info->map_mode)
470 		return (0);
471 
472 	rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode);
473 	if (rc == 0)
474 		bar_info->map_mode = mode;
475 
476 	return (rc);
477 }
478 
479 static int
480 amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode)
481 {
482 	struct amd_ntb_softc *ntb = device_get_softc(dev);
483 	struct amd_ntb_pci_bar_info *bar_info;
484 
485 	amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
486 
487 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
488 		return (EINVAL);
489 
490 	bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx];
491 	*mode = bar_info->map_mode;
492 
493 	return (0);
494 }
495 
496 /*
497  * AMD NTB doorbell routines
498  */
499 static int
500 amd_ntb_db_vector_count(device_t dev)
501 {
502 	struct amd_ntb_softc *ntb = device_get_softc(dev);
503 
504 	amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__,
505 	    ntb->hw_info->db_count);
506 
507 	return (ntb->hw_info->db_count);
508 }
509 
510 static uint64_t
511 amd_ntb_db_valid_mask(device_t dev)
512 {
513 	struct amd_ntb_softc *ntb = device_get_softc(dev);
514 
515 	amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n",
516 	    __func__, ntb->db_valid_mask);
517 
518 	return (ntb->db_valid_mask);
519 }
520 
521 static uint64_t
522 amd_ntb_db_vector_mask(device_t dev, uint32_t vector)
523 {
524 	struct amd_ntb_softc *ntb = device_get_softc(dev);
525 
526 	amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n",
527 	    __func__, vector, ntb->hw_info->db_count, ntb->db_valid_mask);
528 
529 	if (vector < 0 || vector >= ntb->hw_info->db_count)
530 		return (0);
531 
532 	return (ntb->db_valid_mask & (1 << vector));
533 }
534 
535 static uint64_t
536 amd_ntb_db_read(device_t dev)
537 {
538 	struct amd_ntb_softc *ntb = device_get_softc(dev);
539 	uint64_t dbstat_off;
540 
541 	dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET);
542 
543 	amd_ntb_printf(1, "%s: dbstat_off 0x%jx\n", __func__, dbstat_off);
544 
545 	return (dbstat_off);
546 }
547 
548 static void
549 amd_ntb_db_clear(device_t dev, uint64_t db_bits)
550 {
551 	struct amd_ntb_softc *ntb = device_get_softc(dev);
552 
553 	amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits);
554 	amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits);
555 }
556 
557 static void
558 amd_ntb_db_set_mask(device_t dev, uint64_t db_bits)
559 {
560 	struct amd_ntb_softc *ntb = device_get_softc(dev);
561 
562 	DB_MASK_LOCK(ntb);
563 	amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n",
564 	    __func__, ntb->db_mask, db_bits);
565 
566 	ntb->db_mask |= db_bits;
567 	amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
568 	DB_MASK_UNLOCK(ntb);
569 }
570 
571 static void
572 amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits)
573 {
574 	struct amd_ntb_softc *ntb = device_get_softc(dev);
575 
576 	DB_MASK_LOCK(ntb);
577 	amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n",
578 	    __func__, ntb->db_mask, db_bits);
579 
580 	ntb->db_mask &= ~db_bits;
581 	amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
582 	DB_MASK_UNLOCK(ntb);
583 }
584 
585 static void
586 amd_ntb_peer_db_set(device_t dev, uint64_t db_bits)
587 {
588 	struct amd_ntb_softc *ntb = device_get_softc(dev);
589 
590 	amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits);
591 	amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits);
592 }
593 
594 /*
595  * AMD NTB scratchpad routines
596  */
597 static uint8_t
598 amd_ntb_spad_count(device_t dev)
599 {
600 	struct amd_ntb_softc *ntb = device_get_softc(dev);
601 
602 	amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__,
603 	    ntb->spad_count);
604 
605 	return (ntb->spad_count);
606 }
607 
608 static int
609 amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
610 {
611 	struct amd_ntb_softc *ntb = device_get_softc(dev);
612 	uint32_t offset;
613 
614 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
615 
616 	if (idx < 0 || idx >= ntb->spad_count)
617 		return (EINVAL);
618 
619 	offset = ntb->self_spad + (idx << 2);
620 	*val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
621 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
622 
623 	return (0);
624 }
625 
626 static int
627 amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
628 {
629 	struct amd_ntb_softc *ntb = device_get_softc(dev);
630 	uint32_t offset;
631 
632 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
633 
634 	if (idx < 0 || idx >= ntb->spad_count)
635 		return (EINVAL);
636 
637 	offset = ntb->self_spad + (idx << 2);
638 	amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
639 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
640 
641 	return (0);
642 }
643 
644 static void
645 amd_ntb_spad_clear(struct amd_ntb_softc *ntb)
646 {
647 	uint8_t i;
648 
649 	for (i = 0; i < ntb->spad_count; i++)
650 		amd_ntb_spad_write(ntb->device, i, 0);
651 }
652 
653 static int
654 amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
655 {
656 	struct amd_ntb_softc *ntb = device_get_softc(dev);
657 	uint32_t offset;
658 
659 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
660 
661 	if (idx < 0 || idx >= ntb->spad_count)
662 		return (EINVAL);
663 
664 	offset = ntb->peer_spad + (idx << 2);
665 	*val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
666 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
667 
668 	return (0);
669 }
670 
671 static int
672 amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
673 {
674 	struct amd_ntb_softc *ntb = device_get_softc(dev);
675 	uint32_t offset;
676 
677 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
678 
679 	if (idx < 0 || idx >= ntb->spad_count)
680 		return (EINVAL);
681 
682 	offset = ntb->peer_spad + (idx << 2);
683 	amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
684 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
685 
686 	return (0);
687 }
688 
689 
690 /*
691  * AMD NTB INIT
692  */
693 static int
694 amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS)
695 {
696 	struct amd_ntb_softc* ntb = arg1;
697 	struct sbuf *sb;
698 	int rc = 0;
699 
700 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
701 	if (sb == NULL)
702 		return (sb->s_error);
703 
704 	sbuf_printf(sb, "NTB AMD Hardware info:\n\n");
705 	sbuf_printf(sb, "AMD NTB side: %s\n",
706 	    (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
707 	sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta);
708 
709 	if (!amd_link_is_up(ntb))
710 		sbuf_printf(sb, "AMD Link Status: Down\n");
711 	else {
712 		sbuf_printf(sb, "AMD Link Status: Up\n");
713 		sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n",
714 		    NTB_LNK_STA_SPEED(ntb->lnk_sta));
715 		sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n",
716 		    NTB_LNK_STA_WIDTH(ntb->lnk_sta));
717 	}
718 
719 	sbuf_printf(sb, "AMD Memory window count: %d\n",
720 	    ntb->hw_info->mw_count);
721 	sbuf_printf(sb, "AMD Spad count: %d\n",
722 	    ntb->spad_count);
723 	sbuf_printf(sb, "AMD Doorbell count: %d\n",
724 	    ntb->hw_info->db_count);
725 	sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n",
726 	    ntb->msix_vec_count);
727 	sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n",
728 	    ntb->db_valid_mask);
729 	sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n",
730 	    amd_ntb_reg_read(4, AMD_DBMASK_OFFSET));
731 	sbuf_printf(sb, "AMD Doorbell: 0x%x\n",
732 	    amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET));
733 	sbuf_printf(sb, "AMD NTB Incoming XLAT: \n");
734 	sbuf_printf(sb, "AMD XLAT1: 0x%jx\n",
735 	    amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET));
736 	sbuf_printf(sb, "AMD XLAT23: 0x%jx\n",
737 	    amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET));
738 	sbuf_printf(sb, "AMD XLAT45: 0x%jx\n",
739 	    amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET));
740 	sbuf_printf(sb, "AMD LMT1: 0x%x\n",
741 	    amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET));
742 	sbuf_printf(sb, "AMD LMT23: 0x%jx\n",
743 	    amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET));
744 	sbuf_printf(sb, "AMD LMT45: 0x%jx\n",
745 	    amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET));
746 
747 	rc = sbuf_finish(sb);
748 	sbuf_delete(sb);
749 	return (rc);
750 }
751 
752 static void
753 amd_ntb_sysctl_init(struct amd_ntb_softc *ntb)
754 {
755 	struct sysctl_oid_list *globals;
756 	struct sysctl_ctx_list *ctx;
757 
758 	ctx = device_get_sysctl_ctx(ntb->device);
759 	globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
760 
761 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info",
762 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0,
763 	    amd_ntb_hw_info_handler, "A", "AMD NTB HW Information");
764 }
765 
766 /*
767  * Polls the HW link status register(s); returns true if something has changed.
768  */
769 static bool
770 amd_ntb_poll_link(struct amd_ntb_softc *ntb)
771 {
772 	uint32_t fullreg, reg, stat;
773 
774 	fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET);
775 	reg = fullreg & NTB_LIN_STA_ACTIVE_BIT;
776 
777 	if (reg == ntb->cntl_sta)
778 		return (false);
779 
780 	amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n",
781 	    __func__, fullreg, ntb->cntl_sta);
782 
783 	ntb->cntl_sta = reg;
784 
785 	stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4);
786 
787 	amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n",
788 	    __func__, stat, ntb->lnk_sta);
789 
790 	ntb->lnk_sta = stat;
791 
792 	return (true);
793 }
794 
795 static void
796 amd_link_hb(void *arg)
797 {
798 	struct amd_ntb_softc *ntb = arg;
799 
800 	if (amd_ntb_poll_link(ntb))
801 		ntb_link_event(ntb->device);
802 
803 	if (!amd_link_is_up(ntb)) {
804 		callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
805 		    amd_link_hb, ntb);
806 	} else {
807 		callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10),
808 		    amd_link_hb, ntb);
809 	}
810 }
811 
812 static void
813 amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec)
814 {
815 	if (vec < ntb->hw_info->db_count)
816 		ntb_db_event(ntb->device, vec);
817 	else
818 		amd_ntb_printf(0, "Invalid vector %d\n", vec);
819 }
820 
821 static void
822 amd_ntb_vec_isr(void *arg)
823 {
824 	struct amd_ntb_vec *nvec = arg;
825 
826 	amd_ntb_interrupt(nvec->ntb, nvec->num);
827 }
828 
829 static void
830 amd_ntb_irq_isr(void *arg)
831 {
832 	/* If we couldn't set up MSI-X, we only have the one vector. */
833 	amd_ntb_interrupt(arg, 0);
834 }
835 
836 static void
837 amd_init_side_info(struct amd_ntb_softc *ntb)
838 {
839 	unsigned int reg;
840 
841 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
842 	if (!(reg & AMD_SIDE_READY)) {
843 		reg |= AMD_SIDE_READY;
844 		amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
845 	}
846 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
847 }
848 
849 static void
850 amd_deinit_side_info(struct amd_ntb_softc *ntb)
851 {
852 	unsigned int reg;
853 
854 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
855 	if (reg & AMD_SIDE_READY) {
856 		reg &= ~AMD_SIDE_READY;
857 		amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
858 		amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
859 	}
860 }
861 
862 static int
863 amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi,
864     bool intx)
865 {
866 	uint16_t i;
867 	int flags = 0, rc = 0;
868 
869 	flags |= RF_ACTIVE;
870 	if (intx)
871 		flags |= RF_SHAREABLE;
872 
873 	for (i = 0; i < num_vectors; i++) {
874 
875 		/* RID should be 0 for intx */
876 		if (intx)
877 			ntb->int_info[i].rid = i;
878 		else
879 			ntb->int_info[i].rid = i + 1;
880 
881 		ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
882 		    SYS_RES_IRQ, &ntb->int_info[i].rid, flags);
883 		if (ntb->int_info[i].res == NULL) {
884 			amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n");
885 			return (ENOMEM);
886 		}
887 
888 		ntb->int_info[i].tag = NULL;
889 		ntb->allocated_interrupts++;
890 
891 		if (msi || intx) {
892 			rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
893 			    INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr,
894 			    ntb, &ntb->int_info[i].tag);
895 		} else {
896 			rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
897 			    INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr,
898 			    &ntb->msix_vec[i], &ntb->int_info[i].tag);
899 		}
900 
901 		if (rc != 0) {
902 			amd_ntb_printf(0, "bus_setup_intr %d failed\n", i);
903 			return (ENXIO);
904 		}
905 	}
906 
907 	return (0);
908 }
909 
910 static int
911 amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors)
912 {
913 	uint8_t i;
914 
915 	ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB,
916 	    M_ZERO | M_WAITOK);
917 
918 	for (i = 0; i < max_vectors; i++) {
919 		ntb->msix_vec[i].num = i;
920 		ntb->msix_vec[i].ntb = ntb;
921 	}
922 
923 	return (0);
924 }
925 
926 static void
927 amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb)
928 {
929 	if (ntb->msix_vec_count) {
930 		pci_release_msi(ntb->device);
931 		ntb->msix_vec_count = 0;
932 	}
933 
934 	if (ntb->msix_vec != NULL) {
935 		free(ntb->msix_vec, M_AMD_NTB);
936 		ntb->msix_vec = NULL;
937 	}
938 }
939 
940 static int
941 amd_ntb_init_isr(struct amd_ntb_softc *ntb)
942 {
943 	uint32_t supported_vectors, num_vectors;
944 	bool msi = false, intx = false;
945 	int rc = 0;
946 
947 	ntb->db_mask = ntb->db_valid_mask;
948 
949 	rc = amd_ntb_create_msix_vec(ntb, ntb->hw_info->msix_vector_count);
950 	if (rc != 0) {
951 		amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc);
952 		return (ENOMEM);
953 	}
954 
955 	/*
956 	 * Check the number of MSI-X message supported by the device.
957 	 * Minimum necessary MSI-X message count should be equal to db_count.
958 	 */
959 	supported_vectors = pci_msix_count(ntb->device);
960 	num_vectors = MIN(supported_vectors, ntb->hw_info->db_count);
961 	if (num_vectors < ntb->hw_info->db_count) {
962 		amd_ntb_printf(0, "No minimum msix: supported %d db %d\n",
963 		    supported_vectors, ntb->hw_info->db_count);
964 		msi = true;
965 		goto err_msix_enable;
966 	}
967 
968 	/* Allocate the necessary number of MSI-x messages */
969 	rc = pci_alloc_msix(ntb->device, &num_vectors);
970 	if (rc != 0) {
971 		amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
972 		msi = true;
973 		goto err_msix_enable;
974 	}
975 
976 	if (num_vectors < ntb->hw_info->db_count) {
977 		amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors);
978 		msi = true;
979 		/*
980 		 * Else set ntb->hw_info->db_count = ntb->msix_vec_count =
981 		 * num_vectors, msi=false and dont release msi.
982 		 */
983 	}
984 
985 err_msix_enable:
986 
987 	if (msi) {
988 		free(ntb->msix_vec, M_AMD_NTB);
989 		ntb->msix_vec = NULL;
990 		pci_release_msi(ntb->device);
991 		num_vectors = 1;
992 		rc = pci_alloc_msi(ntb->device, &num_vectors);
993 		if (rc != 0) {
994 			amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
995 			msi = false;
996 			intx = true;
997 		}
998 	}
999 
1000 	ntb->hw_info->db_count = ntb->msix_vec_count = num_vectors;
1001 
1002 	if (intx) {
1003 		num_vectors = 1;
1004 		ntb->hw_info->db_count = 1;
1005 		ntb->msix_vec_count = 0;
1006 	}
1007 
1008 	amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n",
1009 	    __func__, ntb->hw_info->db_count, ntb->msix_vec_count, (int)msi, (int)intx);
1010 
1011 	rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx);
1012 	if (rc != 0) {
1013 		amd_ntb_printf(0, "Error setting up isr: %d\n", rc);
1014 		amd_ntb_free_msix_vec(ntb);
1015 	}
1016 
1017 	return (rc);
1018 }
1019 
1020 static void
1021 amd_ntb_deinit_isr(struct amd_ntb_softc *ntb)
1022 {
1023 	struct amd_ntb_int_info *current_int;
1024 	int i;
1025 
1026 	/* Mask all doorbell interrupts */
1027 	ntb->db_mask = ntb->db_valid_mask;
1028 	amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask);
1029 
1030 	for (i = 0; i < ntb->allocated_interrupts; i++) {
1031 		current_int = &ntb->int_info[i];
1032 		if (current_int->tag != NULL)
1033 			bus_teardown_intr(ntb->device, current_int->res,
1034 			    current_int->tag);
1035 
1036 		if (current_int->res != NULL)
1037 			bus_release_resource(ntb->device, SYS_RES_IRQ,
1038 			    rman_get_rid(current_int->res), current_int->res);
1039 	}
1040 
1041 	amd_ntb_free_msix_vec(ntb);
1042 }
1043 
1044 static enum amd_ntb_conn_type
1045 amd_ntb_get_topo(struct amd_ntb_softc *ntb)
1046 {
1047 	uint32_t info;
1048 
1049 	info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
1050 
1051 	if (info & AMD_SIDE_MASK)
1052 		return (NTB_CONN_SEC);
1053 
1054 	return (NTB_CONN_PRI);
1055 }
1056 
1057 static int
1058 amd_ntb_init_dev(struct amd_ntb_softc *ntb)
1059 {
1060 	ntb->db_valid_mask	 = (1ull << ntb->hw_info->db_count) - 1;
1061 	mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN);
1062 
1063 	switch (ntb->conn_type) {
1064 	case NTB_CONN_PRI:
1065 	case NTB_CONN_SEC:
1066 		ntb->spad_count >>= 1;
1067 
1068 		if (ntb->conn_type == NTB_CONN_PRI) {
1069 			ntb->self_spad = 0;
1070 			ntb->peer_spad = 0x20;
1071 		} else {
1072 			ntb->self_spad = 0x20;
1073 			ntb->peer_spad = 0;
1074 		}
1075 
1076 		callout_init(&ntb->hb_timer, 1);
1077 		callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
1078 		    amd_link_hb, ntb);
1079 
1080 		break;
1081 
1082 	default:
1083 		amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n",
1084 		    ntb->conn_type);
1085 		return (EINVAL);
1086 	}
1087 
1088 	ntb->int_mask = AMD_EVENT_INTMASK;
1089 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
1090 
1091 	return (0);
1092 }
1093 
1094 static int
1095 amd_ntb_init(struct amd_ntb_softc *ntb)
1096 {
1097 	int rc = 0;
1098 
1099 	ntb->conn_type = amd_ntb_get_topo(ntb);
1100 	amd_ntb_printf(0, "AMD NTB Side: %s\n",
1101 	    (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
1102 
1103 	rc = amd_ntb_init_dev(ntb);
1104 	if (rc != 0)
1105 		return (rc);
1106 
1107 	rc = amd_ntb_init_isr(ntb);
1108 	if (rc != 0)
1109 		return (rc);
1110 
1111 	return (0);
1112 }
1113 
1114 static void
1115 print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar,
1116     const char *kind)
1117 {
1118 	amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n",
1119 	    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1120 	    (char *)bar->vbase + bar->size - 1, (void *)bar->pbase,
1121 	    (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind);
1122 }
1123 
1124 static void
1125 save_bar_parameters(struct amd_ntb_pci_bar_info *bar)
1126 {
1127 	bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
1128 	bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
1129 	bar->pbase = rman_get_start(bar->pci_resource);
1130 	bar->size = rman_get_size(bar->pci_resource);
1131 	bar->vbase = rman_get_virtual(bar->pci_resource);
1132 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
1133 }
1134 
1135 static int
1136 map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar)
1137 {
1138 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
1139 	    &bar->pci_resource_id, RF_ACTIVE);
1140 	if (bar->pci_resource == NULL)
1141 		return (ENXIO);
1142 
1143 	save_bar_parameters(bar);
1144 	print_map_success(ntb, bar, "mmr");
1145 
1146 	return (0);
1147 }
1148 
1149 static int
1150 amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb)
1151 {
1152 	int rc = 0;
1153 
1154 	/* NTB Config/Control registers - BAR 0 */
1155 	ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0);
1156 	rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]);
1157 	if (rc != 0)
1158 		goto out;
1159 
1160 	/* Memory Window 0 BAR - BAR 1 */
1161 	ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1);
1162 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]);
1163 	if (rc != 0)
1164 		goto out;
1165 	ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET;
1166 	ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET;
1167 
1168 	/* Memory Window 1 BAR - BAR 2&3 */
1169 	ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2);
1170 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]);
1171 	if (rc != 0)
1172 		goto out;
1173 	ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET;
1174 	ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET;
1175 
1176 	/* Memory Window 2 BAR - BAR 4&5 */
1177 	ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4);
1178 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]);
1179 	if (rc != 0)
1180 		goto out;
1181 	ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET;
1182 	ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET;
1183 
1184 out:
1185 	if (rc != 0)
1186 		amd_ntb_printf(0, "unable to allocate pci resource\n");
1187 
1188 	return (rc);
1189 }
1190 
1191 static void
1192 amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb)
1193 {
1194 	struct amd_ntb_pci_bar_info *bar_info;
1195 	int i;
1196 
1197 	for (i = 0; i < NTB_MAX_BARS; i++) {
1198 		bar_info = &ntb->bar_info[i];
1199 		if (bar_info->pci_resource != NULL)
1200 			bus_release_resource(ntb->device, SYS_RES_MEMORY,
1201 			    bar_info->pci_resource_id, bar_info->pci_resource);
1202 	}
1203 }
1204 
1205 static int
1206 amd_ntb_probe(device_t device)
1207 {
1208 	struct amd_ntb_softc *ntb = device_get_softc(device);
1209 	const struct pci_device_table *tbl;
1210 
1211 	tbl = PCI_MATCH(device, amd_ntb_devs);
1212 	if (tbl == NULL)
1213 		return (ENXIO);
1214 
1215 	ntb->hw_info = (struct amd_ntb_hw_info *)tbl->driver_data;
1216 	ntb->spad_count = ntb->hw_info->spad_count;
1217 	device_set_desc(device, tbl->descr);
1218 
1219 	return (BUS_PROBE_GENERIC);
1220 }
1221 
1222 static int
1223 amd_ntb_attach(device_t device)
1224 {
1225 	struct amd_ntb_softc *ntb = device_get_softc(device);
1226 	int error;
1227 
1228 	ntb->device = device;
1229 
1230 	/* Enable PCI bus mastering for "device" */
1231 	pci_enable_busmaster(ntb->device);
1232 
1233 	error = amd_ntb_map_pci_bars(ntb);
1234 	if (error)
1235 		goto out;
1236 
1237 	error = amd_ntb_init(ntb);
1238 	if (error)
1239 		goto out;
1240 
1241 	amd_init_side_info(ntb);
1242 
1243 	amd_ntb_spad_clear(ntb);
1244 
1245 	amd_ntb_sysctl_init(ntb);
1246 
1247 	/* Attach children to this controller */
1248 	error = ntb_register_device(device);
1249 
1250 out:
1251 	if (error)
1252 		amd_ntb_detach(device);
1253 
1254 	return (error);
1255 }
1256 
1257 static int
1258 amd_ntb_detach(device_t device)
1259 {
1260 	struct amd_ntb_softc *ntb = device_get_softc(device);
1261 
1262 	ntb_unregister_device(device);
1263 	amd_deinit_side_info(ntb);
1264 	callout_drain(&ntb->hb_timer);
1265 	amd_ntb_deinit_isr(ntb);
1266 	mtx_destroy(&ntb->db_mask_lock);
1267 	pci_disable_busmaster(ntb->device);
1268 	amd_ntb_unmap_pci_bars(ntb);
1269 
1270 	return (0);
1271 }
1272 
1273 static device_method_t ntb_amd_methods[] = {
1274 	/* Device interface */
1275 	DEVMETHOD(device_probe,		amd_ntb_probe),
1276 	DEVMETHOD(device_attach,	amd_ntb_attach),
1277 	DEVMETHOD(device_detach,	amd_ntb_detach),
1278 
1279 	/* Bus interface */
1280 	DEVMETHOD(bus_child_location_str, ntb_child_location_str),
1281 	DEVMETHOD(bus_print_child,	ntb_print_child),
1282 	DEVMETHOD(bus_get_dma_tag,	ntb_get_dma_tag),
1283 
1284 	/* NTB interface */
1285 	DEVMETHOD(ntb_port_number,	amd_ntb_port_number),
1286 	DEVMETHOD(ntb_peer_port_count,	amd_ntb_peer_port_count),
1287 	DEVMETHOD(ntb_peer_port_number,	amd_ntb_peer_port_number),
1288 	DEVMETHOD(ntb_peer_port_idx, 	amd_ntb_peer_port_idx),
1289 	DEVMETHOD(ntb_link_is_up,	amd_ntb_link_is_up),
1290 	DEVMETHOD(ntb_link_enable,	amd_ntb_link_enable),
1291 	DEVMETHOD(ntb_link_disable,	amd_ntb_link_disable),
1292 	DEVMETHOD(ntb_mw_count,		amd_ntb_mw_count),
1293 	DEVMETHOD(ntb_mw_get_range,	amd_ntb_mw_get_range),
1294 	DEVMETHOD(ntb_mw_set_trans,	amd_ntb_mw_set_trans),
1295 	DEVMETHOD(ntb_mw_clear_trans,	amd_ntb_mw_clear_trans),
1296 	DEVMETHOD(ntb_mw_set_wc,	amd_ntb_mw_set_wc),
1297 	DEVMETHOD(ntb_mw_get_wc,	amd_ntb_mw_get_wc),
1298 	DEVMETHOD(ntb_db_valid_mask,	amd_ntb_db_valid_mask),
1299 	DEVMETHOD(ntb_db_vector_count,	amd_ntb_db_vector_count),
1300 	DEVMETHOD(ntb_db_vector_mask,	amd_ntb_db_vector_mask),
1301 	DEVMETHOD(ntb_db_read,		amd_ntb_db_read),
1302 	DEVMETHOD(ntb_db_clear,		amd_ntb_db_clear),
1303 	DEVMETHOD(ntb_db_set_mask,	amd_ntb_db_set_mask),
1304 	DEVMETHOD(ntb_db_clear_mask,	amd_ntb_db_clear_mask),
1305 	DEVMETHOD(ntb_peer_db_set,	amd_ntb_peer_db_set),
1306 	DEVMETHOD(ntb_spad_count,	amd_ntb_spad_count),
1307 	DEVMETHOD(ntb_spad_read,	amd_ntb_spad_read),
1308 	DEVMETHOD(ntb_spad_write,	amd_ntb_spad_write),
1309 	DEVMETHOD(ntb_peer_spad_read,	amd_ntb_peer_spad_read),
1310 	DEVMETHOD(ntb_peer_spad_write,	amd_ntb_peer_spad_write),
1311 	DEVMETHOD_END
1312 };
1313 
1314 static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods,
1315     sizeof(struct amd_ntb_softc));
1316 DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, ntb_hw_devclass, NULL, NULL);
1317 MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1);
1318 MODULE_VERSION(ntb_hw_amd, 1);
1319 PCI_PNP_INFO(amd_ntb_devs);
1320