1 /* $OpenBSD: if_le_ioasic.c,v 1.18 2017/10/13 08:58:42 mpi Exp $ */ 2 /* $NetBSD: if_le_ioasic.c,v 1.18 2001/11/13 06:26:10 lukem Exp $ */ 3 4 /* 5 * Copyright (c) 1996 Carnegie-Mellon University. 6 * All rights reserved. 7 * 8 * Author: Chris G. Demetriou 9 * 10 * Permission to use, copy, modify and distribute this software and 11 * its documentation is hereby granted, provided that both the copyright 12 * notice and this permission notice appear in all copies of the 13 * software, derivative works or modified versions, and any portions 14 * thereof, and that both notices appear in supporting documentation. 15 * 16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 19 * 20 * Carnegie Mellon requests users of this software to return to 21 * 22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 23 * School of Computer Science 24 * Carnegie Mellon University 25 * Pittsburgh PA 15213-3890 26 * 27 * any improvements or extensions that they make and grant Carnegie the 28 * rights to redistribute these changes. 29 */ 30 31 /* 32 * LANCE on DEC IOCTL ASIC. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/mbuf.h> 38 #include <sys/syslog.h> 39 #include <sys/socket.h> 40 #include <sys/device.h> 41 42 #include <net/if.h> 43 #include <net/if_media.h> 44 45 #include <netinet/in.h> 46 #include <netinet/if_ether.h> 47 48 #include <dev/ic/lancereg.h> 49 #include <dev/ic/lancevar.h> 50 #include <dev/ic/am7990reg.h> 51 #include <dev/ic/am7990var.h> 52 53 #include <dev/tc/if_levar.h> 54 #include <dev/tc/tcvar.h> 55 #include <dev/tc/ioasicreg.h> 56 #include <dev/tc/ioasicvar.h> 57 58 #ifdef __alpha__ 59 #include <machine/rpb.h> 60 #endif /* __alpha__ */ 61 62 struct le_ioasic_softc { 63 struct am7990_softc sc_am7990; /* glue to MI code */ 64 struct lereg1 *sc_r1; /* LANCE registers */ 65 /* XXX must match with le_softc of if_levar.h XXX */ 66 67 bus_dma_tag_t sc_dmat; /* bus dma tag */ 68 bus_dmamap_t sc_dmamap; /* bus dmamap */ 69 }; 70 71 int le_ioasic_match(struct device *, void *, void *); 72 void le_ioasic_attach(struct device *, struct device *, void *); 73 74 struct cfattach le_ioasic_ca = { 75 sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach 76 }; 77 78 void le_ioasic_copytobuf_gap2(struct lance_softc *, void *, int, int); 79 void le_ioasic_copyfrombuf_gap2(struct lance_softc *, void *, int, int); 80 void le_ioasic_copytobuf_gap16(struct lance_softc *, void *, int, int); 81 void le_ioasic_copyfrombuf_gap16(struct lance_softc *, void *, int, int); 82 void le_ioasic_zerobuf_gap16(struct lance_softc *, int, int); 83 84 #ifdef __alpha__ 85 #ifdef DEC_3000_500 86 int le_ioasic_ifmedia_change(struct lance_softc *); 87 void le_ioasic_ifmedia_status(struct lance_softc *, struct ifmediareq *); 88 void le_ioasic_nocarrier(struct lance_softc *); 89 #endif 90 #endif /* __alpha__ */ 91 92 int 93 le_ioasic_match(struct device *parent, void *match, void *aux) 94 { 95 struct ioasicdev_attach_args *d = aux; 96 97 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0) 98 return 0; 99 100 return 1; 101 } 102 103 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */ 104 #define LE_IOASIC_MEMSIZE (128*1024) 105 #define LE_IOASIC_MEMALIGN (128*1024) 106 107 void 108 le_ioasic_attach(struct device *parent, struct device *self, void *aux) 109 { 110 struct le_ioasic_softc *sc = (void *)self; 111 struct ioasicdev_attach_args *d = aux; 112 struct lance_softc *le = &sc->sc_am7990.lsc; 113 bus_space_tag_t ioasic_bst; 114 bus_space_handle_t ioasic_bsh; 115 bus_dma_tag_t dmat; 116 bus_dma_segment_t seg; 117 tc_addr_t tca; 118 u_int32_t ssr; 119 int rseg; 120 caddr_t le_iomem; 121 122 ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst; 123 ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh; 124 dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat; 125 /* 126 * Allocate a DMA area for the chip. 127 */ 128 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN, 129 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 130 printf("can't allocate DMA area for LANCE\n"); 131 return; 132 } 133 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE, 134 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) { 135 printf("can't map DMA area for LANCE\n"); 136 bus_dmamem_free(dmat, &seg, rseg); 137 return; 138 } 139 /* 140 * Create and load the DMA map for the DMA area. 141 */ 142 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1, 143 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 144 printf("can't create DMA map\n"); 145 goto bad; 146 } 147 if (bus_dmamap_load(dmat, sc->sc_dmamap, 148 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) { 149 printf("can't load DMA map\n"); 150 goto bad; 151 } 152 /* 153 * Bind 128KB buffer with IOASIC DMA. 154 */ 155 tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr); 156 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca); 157 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR); 158 ssr |= IOASIC_CSR_DMAEN_LANCE; 159 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr); 160 161 sc->sc_r1 = (struct lereg1 *) 162 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr)); 163 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem); 164 le->sc_copytodesc = le_ioasic_copytobuf_gap2; 165 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2; 166 le->sc_copytobuf = le_ioasic_copytobuf_gap16; 167 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16; 168 le->sc_zerobuf = le_ioasic_zerobuf_gap16; 169 170 #ifdef __alpha__ 171 #ifdef DEC_3000_500 172 /* 173 * On non-300 DEC 3000 models, both AUI and UTP are available. 174 */ 175 if (cputype == ST_DEC_3000_500) { 176 static const uint64_t media[] = { 177 IFM_ETHER | IFM_10_T, 178 IFM_ETHER | IFM_10_5, 179 IFM_ETHER | IFM_AUTO 180 }; 181 le->sc_mediachange = le_ioasic_ifmedia_change; 182 le->sc_mediastatus = le_ioasic_ifmedia_status; 183 le->sc_supmedia = media; 184 le->sc_nsupmedia = nitems(media); 185 le->sc_defaultmedia = IFM_ETHER | IFM_AUTO; 186 le->sc_nocarrier = le_ioasic_nocarrier; 187 } 188 #endif 189 #endif /* __alpha__ */ 190 191 dec_le_common_attach(&sc->sc_am7990, 192 (u_char *)((struct ioasic_softc *)parent)->sc_base 193 + IOASIC_SLOT_2_START); 194 195 ioasic_intr_establish(parent, d->iada_cookie, IPL_NET, 196 am7990_intr, sc, self->dv_xname); 197 return; 198 199 bad: 200 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE); 201 bus_dmamem_free(dmat, &seg, rseg); 202 } 203 204 /* 205 * Special memory access functions needed by ioasic-attached LANCE 206 * chips. 207 */ 208 209 /* 210 * gap2: two bytes of data followed by two bytes of pad. 211 * 212 * Buffers must be 4-byte aligned. The code doesn't worry about 213 * doing an extra byte. 214 */ 215 216 void 217 le_ioasic_copytobuf_gap2(struct lance_softc *sc, void *fromv, 218 int boff, int len) 219 { 220 volatile caddr_t buf = sc->sc_mem; 221 caddr_t from = fromv; 222 volatile u_int16_t *bptr; 223 224 if (boff & 0x1) { 225 /* handle unaligned first byte */ 226 bptr = ((volatile u_int16_t *)buf) + (boff - 1); 227 *bptr = (*from++ << 8) | (*bptr & 0xff); 228 bptr += 2; 229 len--; 230 } else 231 bptr = ((volatile u_int16_t *)buf) + boff; 232 while (len > 1) { 233 *bptr = (from[1] << 8) | (from[0] & 0xff); 234 bptr += 2; 235 from += 2; 236 len -= 2; 237 } 238 if (len == 1) 239 *bptr = (u_int16_t)*from; 240 } 241 242 void 243 le_ioasic_copyfrombuf_gap2(struct lance_softc *sc, void *tov, 244 int boff, int len) 245 { 246 volatile caddr_t buf = sc->sc_mem; 247 caddr_t to = tov; 248 volatile u_int16_t *bptr; 249 u_int16_t tmp; 250 251 if (boff & 0x1) { 252 /* handle unaligned first byte */ 253 bptr = ((volatile u_int16_t *)buf) + (boff - 1); 254 *to++ = (*bptr >> 8) & 0xff; 255 bptr += 2; 256 len--; 257 } else 258 bptr = ((volatile u_int16_t *)buf) + boff; 259 while (len > 1) { 260 tmp = *bptr; 261 *to++ = tmp & 0xff; 262 *to++ = (tmp >> 8) & 0xff; 263 bptr += 2; 264 len -= 2; 265 } 266 if (len == 1) 267 *to = *bptr & 0xff; 268 } 269 270 /* 271 * gap16: 16 bytes of data followed by 16 bytes of pad. 272 * 273 * Buffers must be 32-byte aligned. 274 */ 275 276 void 277 le_ioasic_copytobuf_gap16(struct lance_softc *sc, void *fromv, 278 int boff, int len) 279 { 280 volatile caddr_t buf = sc->sc_mem; 281 caddr_t from = fromv; 282 caddr_t bptr; 283 284 bptr = buf + ((boff << 1) & ~0x1f); 285 boff &= 0xf; 286 287 /* 288 * Dispose of boff so destination of subsequent copies is 289 * 16-byte aligned. 290 */ 291 if (boff) { 292 int xfer; 293 xfer = min(len, 16 - boff); 294 bcopy(from, bptr + boff, xfer); 295 from += xfer; 296 bptr += 32; 297 len -= xfer; 298 } 299 300 /* Destination of copies is now 16-byte aligned. */ 301 if (len >= 16) 302 switch ((u_long)from & (sizeof(u_int32_t) -1)) { 303 case 2: 304 /* Ethernet headers make this the dominant case. */ 305 do { 306 u_int32_t *dst = (u_int32_t*)bptr; 307 u_int16_t t0; 308 u_int32_t t1, t2, t3, t4; 309 310 /* read from odd-16-bit-aligned, cached src */ 311 t0 = *(u_int16_t*)from; 312 t1 = *(u_int32_t*)(from+2); 313 t2 = *(u_int32_t*)(from+6); 314 t3 = *(u_int32_t*)(from+10); 315 t4 = *(u_int16_t*)(from+14); 316 317 /* DMA buffer is uncached on mips */ 318 dst[0] = t0 | (t1 << 16); 319 dst[1] = (t1 >> 16) | (t2 << 16); 320 dst[2] = (t2 >> 16) | (t3 << 16); 321 dst[3] = (t3 >> 16) | (t4 << 16); 322 323 from += 16; 324 bptr += 32; 325 len -= 16; 326 } while (len >= 16); 327 break; 328 329 case 0: 330 do { 331 u_int32_t *src = (u_int32_t*)from; 332 u_int32_t *dst = (u_int32_t*)bptr; 333 u_int32_t t0, t1, t2, t3; 334 335 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 336 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 337 338 from += 16; 339 bptr += 32; 340 len -= 16; 341 } while (len >= 16); 342 break; 343 344 default: 345 /* Does odd-aligned case ever happen? */ 346 do { 347 bcopy(from, bptr, 16); 348 from += 16; 349 bptr += 32; 350 len -= 16; 351 } while (len >= 16); 352 break; 353 } 354 if (len) 355 bcopy(from, bptr, len); 356 } 357 358 void 359 le_ioasic_copyfrombuf_gap16(struct lance_softc *sc, void *tov, 360 int boff, int len) 361 { 362 volatile caddr_t buf = sc->sc_mem; 363 caddr_t to = tov; 364 caddr_t bptr; 365 366 bptr = buf + ((boff << 1) & ~0x1f); 367 boff &= 0xf; 368 369 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */ 370 if (boff) { 371 int xfer; 372 xfer = min(len, 16 - boff); 373 bcopy(bptr+boff, to, xfer); 374 to += xfer; 375 bptr += 32; 376 len -= xfer; 377 } 378 if (len >= 16) 379 switch ((u_long)to & (sizeof(u_int32_t) -1)) { 380 case 2: 381 /* 382 * to is aligned to an odd 16-bit boundary. Ethernet headers 383 * make this the dominant case (98% or more). 384 */ 385 do { 386 u_int32_t *src = (u_int32_t*)bptr; 387 u_int32_t t0, t1, t2, t3; 388 389 /* read from uncached aligned DMA buf */ 390 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 391 392 /* write to odd-16-bit-word aligned dst */ 393 *(u_int16_t *) (to+0) = (u_short) t0; 394 *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16); 395 *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16); 396 *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16); 397 *(u_int16_t *) (to+14) = (t3 >> 16); 398 bptr += 32; 399 to += 16; 400 len -= 16; 401 } while (len > 16); 402 break; 403 case 0: 404 /* 32-bit aligned aligned copy. Rare. */ 405 do { 406 u_int32_t *src = (u_int32_t*)bptr; 407 u_int32_t *dst = (u_int32_t*)to; 408 u_int32_t t0, t1, t2, t3; 409 410 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 411 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 412 to += 16; 413 bptr += 32; 414 len -= 16; 415 } while (len > 16); 416 break; 417 418 /* XXX Does odd-byte-aligned case ever happen? */ 419 default: 420 do { 421 bcopy(bptr, to, 16); 422 to += 16; 423 bptr += 32; 424 len -= 16; 425 } while (len > 16); 426 break; 427 } 428 if (len) 429 bcopy(bptr, to, len); 430 } 431 432 void 433 le_ioasic_zerobuf_gap16(struct lance_softc *sc, int boff, int len) 434 { 435 volatile caddr_t buf = sc->sc_mem; 436 caddr_t bptr; 437 int xfer; 438 439 bptr = buf + ((boff << 1) & ~0x1f); 440 boff &= 0xf; 441 xfer = min(len, 16 - boff); 442 while (len > 0) { 443 bzero(bptr + boff, xfer); 444 bptr += 32; 445 boff = 0; 446 len -= xfer; 447 xfer = min(len, 16); 448 } 449 } 450 451 #ifdef __alpha__ 452 #ifdef DEC_3000_500 453 int 454 le_ioasic_ifmedia_change(struct lance_softc *lsc) 455 { 456 struct le_ioasic_softc *sc = (struct le_ioasic_softc *)lsc; 457 struct ifmedia *ifm = &sc->sc_am7990.lsc.sc_ifmedia; 458 bus_space_tag_t ioasic_bst = 459 ((struct ioasic_softc *)sc->sc_am7990.lsc.sc_dev.dv_parent)->sc_bst; 460 bus_space_handle_t ioasic_bsh = 461 ((struct ioasic_softc *)sc->sc_am7990.lsc.sc_dev.dv_parent)->sc_bsh; 462 u_int32_t ossr, ssr; 463 464 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 465 return EINVAL; 466 467 ossr = ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR); 468 469 switch (IFM_SUBTYPE(ifm->ifm_media)) { 470 case IFM_10_5: 471 ssr &= ~IOASIC_CSR_ETHERNET_UTP; 472 break; 473 case IFM_10_T: 474 ssr |= IOASIC_CSR_ETHERNET_UTP; 475 break; 476 case IFM_AUTO: 477 break; 478 default: 479 return EINVAL; 480 } 481 482 if (ossr != ssr) 483 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr); 484 485 return 0; 486 } 487 488 void 489 le_ioasic_ifmedia_status(struct lance_softc *lsc, struct ifmediareq *req) 490 { 491 struct le_ioasic_softc *sc = (struct le_ioasic_softc *)lsc; 492 bus_space_tag_t ioasic_bst = 493 ((struct ioasic_softc *)sc->sc_am7990.lsc.sc_dev.dv_parent)->sc_bst; 494 bus_space_handle_t ioasic_bsh = 495 ((struct ioasic_softc *)sc->sc_am7990.lsc.sc_dev.dv_parent)->sc_bsh; 496 u_int32_t ssr; 497 498 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR); 499 500 if (ssr & IOASIC_CSR_ETHERNET_UTP) 501 req->ifm_active = IFM_ETHER | IFM_10_T; 502 else 503 req->ifm_active = IFM_ETHER | IFM_10_5; 504 } 505 506 void 507 le_ioasic_nocarrier(struct lance_softc *lsc) 508 { 509 struct le_ioasic_softc *sc = (struct le_ioasic_softc *)lsc; 510 bus_space_tag_t ioasic_bst = 511 ((struct ioasic_softc *)sc->sc_am7990.lsc.sc_dev.dv_parent)->sc_bst; 512 bus_space_handle_t ioasic_bsh = 513 ((struct ioasic_softc *)sc->sc_am7990.lsc.sc_dev.dv_parent)->sc_bsh; 514 u_int32_t ossr, ssr; 515 516 ossr = ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR); 517 518 if (ssr & IOASIC_CSR_ETHERNET_UTP) { 519 switch (IFM_SUBTYPE(lsc->sc_ifmedia.ifm_media)) { 520 case IFM_10_5: 521 case IFM_AUTO: 522 printf("%s: lost carrier on UTP port" 523 ", switching to AUI port\n", lsc->sc_dev.dv_xname); 524 ssr ^= IOASIC_CSR_ETHERNET_UTP; 525 break; 526 } 527 } else { 528 switch (IFM_SUBTYPE(lsc->sc_ifmedia.ifm_media)) { 529 case IFM_10_T: 530 case IFM_AUTO: 531 printf("%s: lost carrier on AUI port" 532 ", switching to UTP port\n", lsc->sc_dev.dv_xname); 533 ssr ^= IOASIC_CSR_ETHERNET_UTP; 534 break; 535 } 536 } 537 538 if (ossr != ssr) 539 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr); 540 } 541 #endif 542 #endif /* __alpha__ */ 543