1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2009 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at: 10 * http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When using or redistributing this file, you may do so under the 15 * License only. No other modification of this header is permitted. 16 * 17 * If applicable, add the following below this CDDL HEADER, with the 18 * fields enclosed by brackets "[]" replaced with your own identifying 19 * information: Portions Copyright [yyyy] [name of copyright owner] 20 * 21 * CDDL HEADER END 22 */ 23 24 /* 25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 30 #include "ixgbe_sw.h" 31 32 static int ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *); 33 static void ixgbe_free_tbd_ring(ixgbe_tx_ring_t *); 34 static int ixgbe_alloc_rbd_ring(ixgbe_rx_ring_t *); 35 static void ixgbe_free_rbd_ring(ixgbe_rx_ring_t *); 36 static int ixgbe_alloc_dma_buffer(ixgbe_t *, dma_buffer_t *, size_t); 37 static void ixgbe_free_dma_buffer(dma_buffer_t *); 38 static int ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *); 39 static void ixgbe_free_tcb_lists(ixgbe_tx_ring_t *); 40 static int ixgbe_alloc_rcb_lists(ixgbe_rx_ring_t *); 41 static void ixgbe_free_rcb_lists(ixgbe_rx_ring_t *); 42 43 #ifdef __sparc 44 #define IXGBE_DMA_ALIGNMENT 0x0000000000002000ull 45 #else 46 #define IXGBE_DMA_ALIGNMENT 0x0000000000001000ull 47 #endif 48 49 /* 50 * DMA attributes for tx/rx descriptors. 51 */ 52 static ddi_dma_attr_t ixgbe_desc_dma_attr = { 53 DMA_ATTR_V0, /* version number */ 54 0x0000000000000000ull, /* low address */ 55 0xFFFFFFFFFFFFFFFFull, /* high address */ 56 0x00000000FFFFFFFFull, /* dma counter max */ 57 IXGBE_DMA_ALIGNMENT, /* alignment */ 58 0x00000FFF, /* burst sizes */ 59 0x00000001, /* minimum transfer size */ 60 0x00000000FFFFFFFFull, /* maximum transfer size */ 61 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ 62 1, /* scatter/gather list length */ 63 0x00000001, /* granularity */ 64 DDI_DMA_FLAGERR /* DMA flags */ 65 }; 66 67 /* 68 * DMA attributes for tx/rx buffers. 69 */ 70 static ddi_dma_attr_t ixgbe_buf_dma_attr = { 71 DMA_ATTR_V0, /* version number */ 72 0x0000000000000000ull, /* low address */ 73 0xFFFFFFFFFFFFFFFFull, /* high address */ 74 0x00000000FFFFFFFFull, /* dma counter max */ 75 IXGBE_DMA_ALIGNMENT, /* alignment */ 76 0x00000FFF, /* burst sizes */ 77 0x00000001, /* minimum transfer size */ 78 0x00000000FFFFFFFFull, /* maximum transfer size */ 79 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ 80 1, /* scatter/gather list length */ 81 0x00000001, /* granularity */ 82 DDI_DMA_FLAGERR /* DMA flags */ 83 }; 84 85 /* 86 * DMA attributes for transmit. 87 */ 88 static ddi_dma_attr_t ixgbe_tx_dma_attr = { 89 DMA_ATTR_V0, /* version number */ 90 0x0000000000000000ull, /* low address */ 91 0xFFFFFFFFFFFFFFFFull, /* high address */ 92 0x00000000FFFFFFFFull, /* dma counter max */ 93 1, /* alignment */ 94 0x00000FFF, /* burst sizes */ 95 0x00000001, /* minimum transfer size */ 96 0x00000000FFFFFFFFull, /* maximum transfer size */ 97 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ 98 MAX_COOKIE, /* scatter/gather list length */ 99 0x00000001, /* granularity */ 100 DDI_DMA_FLAGERR /* DMA flags */ 101 }; 102 103 /* 104 * DMA access attributes for descriptors. 105 */ 106 static ddi_device_acc_attr_t ixgbe_desc_acc_attr = { 107 DDI_DEVICE_ATTR_V0, 108 DDI_STRUCTURE_LE_ACC, 109 DDI_STRICTORDER_ACC, 110 DDI_FLAGERR_ACC 111 }; 112 113 /* 114 * DMA access attributes for buffers. 115 */ 116 static ddi_device_acc_attr_t ixgbe_buf_acc_attr = { 117 DDI_DEVICE_ATTR_V0, 118 DDI_NEVERSWAP_ACC, 119 DDI_STRICTORDER_ACC 120 }; 121 122 /* 123 * ixgbe_alloc_dma - Allocate DMA resources for all rx/tx rings. 124 */ 125 int 126 ixgbe_alloc_dma(ixgbe_t *ixgbe) 127 { 128 ixgbe_rx_ring_t *rx_ring; 129 ixgbe_tx_ring_t *tx_ring; 130 int i; 131 132 for (i = 0; i < ixgbe->num_rx_rings; i++) { 133 /* 134 * Allocate receive desciptor ring and control block lists 135 */ 136 rx_ring = &ixgbe->rx_rings[i]; 137 138 if (ixgbe_alloc_rbd_ring(rx_ring) != IXGBE_SUCCESS) 139 goto alloc_dma_failure; 140 141 if (ixgbe_alloc_rcb_lists(rx_ring) != IXGBE_SUCCESS) 142 goto alloc_dma_failure; 143 } 144 145 for (i = 0; i < ixgbe->num_tx_rings; i++) { 146 /* 147 * Allocate transmit desciptor ring and control block lists 148 */ 149 tx_ring = &ixgbe->tx_rings[i]; 150 151 if (ixgbe_alloc_tbd_ring(tx_ring) != IXGBE_SUCCESS) 152 goto alloc_dma_failure; 153 154 if (ixgbe_alloc_tcb_lists(tx_ring) != IXGBE_SUCCESS) 155 goto alloc_dma_failure; 156 } 157 158 return (IXGBE_SUCCESS); 159 160 alloc_dma_failure: 161 ixgbe_free_dma(ixgbe); 162 163 return (IXGBE_FAILURE); 164 } 165 166 /* 167 * ixgbe_free_dma - Free all the DMA resources of all rx/tx rings. 168 */ 169 void 170 ixgbe_free_dma(ixgbe_t *ixgbe) 171 { 172 ixgbe_rx_ring_t *rx_ring; 173 ixgbe_tx_ring_t *tx_ring; 174 int i; 175 176 /* 177 * Free DMA resources of rx rings 178 */ 179 for (i = 0; i < ixgbe->num_rx_rings; i++) { 180 rx_ring = &ixgbe->rx_rings[i]; 181 ixgbe_free_rbd_ring(rx_ring); 182 ixgbe_free_rcb_lists(rx_ring); 183 } 184 185 /* 186 * Free DMA resources of tx rings 187 */ 188 for (i = 0; i < ixgbe->num_tx_rings; i++) { 189 tx_ring = &ixgbe->tx_rings[i]; 190 ixgbe_free_tbd_ring(tx_ring); 191 ixgbe_free_tcb_lists(tx_ring); 192 } 193 } 194 195 /* 196 * ixgbe_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring. 197 */ 198 static int 199 ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *tx_ring) 200 { 201 int ret; 202 size_t size; 203 size_t len; 204 uint_t cookie_num; 205 dev_info_t *devinfo; 206 ddi_dma_cookie_t cookie; 207 ixgbe_t *ixgbe = tx_ring->ixgbe; 208 209 devinfo = ixgbe->dip; 210 size = sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size; 211 212 /* 213 * If tx head write-back is enabled, an extra tbd is allocated 214 * to save the head write-back value 215 */ 216 if (ixgbe->tx_head_wb_enable) { 217 size += sizeof (union ixgbe_adv_tx_desc); 218 } 219 220 /* 221 * Allocate a DMA handle for the transmit descriptor 222 * memory area. 223 */ 224 ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr, 225 DDI_DMA_DONTWAIT, NULL, 226 &tx_ring->tbd_area.dma_handle); 227 228 if (ret != DDI_SUCCESS) { 229 ixgbe_error(ixgbe, 230 "Could not allocate tbd dma handle: %x", ret); 231 tx_ring->tbd_area.dma_handle = NULL; 232 233 return (IXGBE_FAILURE); 234 } 235 236 /* 237 * Allocate memory to DMA data to and from the transmit 238 * descriptors. 239 */ 240 ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle, 241 size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT, 242 DDI_DMA_DONTWAIT, NULL, 243 (caddr_t *)&tx_ring->tbd_area.address, 244 &len, &tx_ring->tbd_area.acc_handle); 245 246 if (ret != DDI_SUCCESS) { 247 ixgbe_error(ixgbe, 248 "Could not allocate tbd dma memory: %x", ret); 249 tx_ring->tbd_area.acc_handle = NULL; 250 tx_ring->tbd_area.address = NULL; 251 if (tx_ring->tbd_area.dma_handle != NULL) { 252 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); 253 tx_ring->tbd_area.dma_handle = NULL; 254 } 255 return (IXGBE_FAILURE); 256 } 257 258 /* 259 * Initialize the entire transmit buffer descriptor area to zero 260 */ 261 bzero(tx_ring->tbd_area.address, len); 262 263 /* 264 * Allocates DMA resources for the memory that was allocated by 265 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the 266 * the memory address 267 */ 268 ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle, 269 NULL, (caddr_t)tx_ring->tbd_area.address, 270 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 271 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); 272 273 if (ret != DDI_DMA_MAPPED) { 274 ixgbe_error(ixgbe, 275 "Could not bind tbd dma resource: %x", ret); 276 tx_ring->tbd_area.dma_address = NULL; 277 if (tx_ring->tbd_area.acc_handle != NULL) { 278 ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle); 279 tx_ring->tbd_area.acc_handle = NULL; 280 tx_ring->tbd_area.address = NULL; 281 } 282 if (tx_ring->tbd_area.dma_handle != NULL) { 283 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); 284 tx_ring->tbd_area.dma_handle = NULL; 285 } 286 return (IXGBE_FAILURE); 287 } 288 289 ASSERT(cookie_num == 1); 290 291 tx_ring->tbd_area.dma_address = cookie.dmac_laddress; 292 tx_ring->tbd_area.size = len; 293 294 tx_ring->tbd_ring = (union ixgbe_adv_tx_desc *)(uintptr_t) 295 tx_ring->tbd_area.address; 296 297 return (IXGBE_SUCCESS); 298 } 299 300 /* 301 * ixgbe_free_tbd_ring - Free the tx descriptors of one ring. 302 */ 303 static void 304 ixgbe_free_tbd_ring(ixgbe_tx_ring_t *tx_ring) 305 { 306 if (tx_ring->tbd_area.dma_handle != NULL) { 307 (void) ddi_dma_unbind_handle(tx_ring->tbd_area.dma_handle); 308 } 309 if (tx_ring->tbd_area.acc_handle != NULL) { 310 ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle); 311 tx_ring->tbd_area.acc_handle = NULL; 312 } 313 if (tx_ring->tbd_area.dma_handle != NULL) { 314 ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle); 315 tx_ring->tbd_area.dma_handle = NULL; 316 } 317 tx_ring->tbd_area.address = NULL; 318 tx_ring->tbd_area.dma_address = NULL; 319 tx_ring->tbd_area.size = 0; 320 321 tx_ring->tbd_ring = NULL; 322 } 323 324 /* 325 * ixgbe_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring. 326 */ 327 static int 328 ixgbe_alloc_rbd_ring(ixgbe_rx_ring_t *rx_ring) 329 { 330 int ret; 331 size_t size; 332 size_t len; 333 uint_t cookie_num; 334 dev_info_t *devinfo; 335 ddi_dma_cookie_t cookie; 336 ixgbe_t *ixgbe = rx_ring->ixgbe; 337 338 devinfo = ixgbe->dip; 339 size = sizeof (union ixgbe_adv_rx_desc) * rx_ring->ring_size; 340 341 /* 342 * Allocate a new DMA handle for the receive descriptor 343 * memory area. 344 */ 345 ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr, 346 DDI_DMA_DONTWAIT, NULL, 347 &rx_ring->rbd_area.dma_handle); 348 349 if (ret != DDI_SUCCESS) { 350 ixgbe_error(ixgbe, 351 "Could not allocate rbd dma handle: %x", ret); 352 rx_ring->rbd_area.dma_handle = NULL; 353 return (IXGBE_FAILURE); 354 } 355 356 /* 357 * Allocate memory to DMA data to and from the receive 358 * descriptors. 359 */ 360 ret = ddi_dma_mem_alloc(rx_ring->rbd_area.dma_handle, 361 size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT, 362 DDI_DMA_DONTWAIT, NULL, 363 (caddr_t *)&rx_ring->rbd_area.address, 364 &len, &rx_ring->rbd_area.acc_handle); 365 366 if (ret != DDI_SUCCESS) { 367 ixgbe_error(ixgbe, 368 "Could not allocate rbd dma memory: %x", ret); 369 rx_ring->rbd_area.acc_handle = NULL; 370 rx_ring->rbd_area.address = NULL; 371 if (rx_ring->rbd_area.dma_handle != NULL) { 372 ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); 373 rx_ring->rbd_area.dma_handle = NULL; 374 } 375 return (IXGBE_FAILURE); 376 } 377 378 /* 379 * Initialize the entire transmit buffer descriptor area to zero 380 */ 381 bzero(rx_ring->rbd_area.address, len); 382 383 /* 384 * Allocates DMA resources for the memory that was allocated by 385 * the ddi_dma_mem_alloc call. 386 */ 387 ret = ddi_dma_addr_bind_handle(rx_ring->rbd_area.dma_handle, 388 NULL, (caddr_t)rx_ring->rbd_area.address, 389 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 390 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); 391 392 if (ret != DDI_DMA_MAPPED) { 393 ixgbe_error(ixgbe, 394 "Could not bind rbd dma resource: %x", ret); 395 rx_ring->rbd_area.dma_address = NULL; 396 if (rx_ring->rbd_area.acc_handle != NULL) { 397 ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle); 398 rx_ring->rbd_area.acc_handle = NULL; 399 rx_ring->rbd_area.address = NULL; 400 } 401 if (rx_ring->rbd_area.dma_handle != NULL) { 402 ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); 403 rx_ring->rbd_area.dma_handle = NULL; 404 } 405 return (IXGBE_FAILURE); 406 } 407 408 ASSERT(cookie_num == 1); 409 410 rx_ring->rbd_area.dma_address = cookie.dmac_laddress; 411 rx_ring->rbd_area.size = len; 412 413 rx_ring->rbd_ring = (union ixgbe_adv_rx_desc *)(uintptr_t) 414 rx_ring->rbd_area.address; 415 416 return (IXGBE_SUCCESS); 417 } 418 419 /* 420 * ixgbe_free_rbd_ring - Free the rx descriptors of one ring. 421 */ 422 static void 423 ixgbe_free_rbd_ring(ixgbe_rx_ring_t *rx_ring) 424 { 425 if (rx_ring->rbd_area.dma_handle != NULL) { 426 (void) ddi_dma_unbind_handle(rx_ring->rbd_area.dma_handle); 427 } 428 if (rx_ring->rbd_area.acc_handle != NULL) { 429 ddi_dma_mem_free(&rx_ring->rbd_area.acc_handle); 430 rx_ring->rbd_area.acc_handle = NULL; 431 } 432 if (rx_ring->rbd_area.dma_handle != NULL) { 433 ddi_dma_free_handle(&rx_ring->rbd_area.dma_handle); 434 rx_ring->rbd_area.dma_handle = NULL; 435 } 436 rx_ring->rbd_area.address = NULL; 437 rx_ring->rbd_area.dma_address = NULL; 438 rx_ring->rbd_area.size = 0; 439 440 rx_ring->rbd_ring = NULL; 441 } 442 443 /* 444 * ixgbe_alloc_dma_buffer - Allocate DMA resources for a DMA buffer. 445 */ 446 static int 447 ixgbe_alloc_dma_buffer(ixgbe_t *ixgbe, dma_buffer_t *buf, size_t size) 448 { 449 int ret; 450 dev_info_t *devinfo = ixgbe->dip; 451 ddi_dma_cookie_t cookie; 452 size_t len; 453 uint_t cookie_num; 454 455 ret = ddi_dma_alloc_handle(devinfo, 456 &ixgbe_buf_dma_attr, DDI_DMA_DONTWAIT, 457 NULL, &buf->dma_handle); 458 459 if (ret != DDI_SUCCESS) { 460 buf->dma_handle = NULL; 461 ixgbe_error(ixgbe, 462 "Could not allocate dma buffer handle: %x", ret); 463 return (IXGBE_FAILURE); 464 } 465 466 ret = ddi_dma_mem_alloc(buf->dma_handle, 467 size, &ixgbe_buf_acc_attr, DDI_DMA_STREAMING, 468 DDI_DMA_DONTWAIT, NULL, &buf->address, 469 &len, &buf->acc_handle); 470 471 if (ret != DDI_SUCCESS) { 472 buf->acc_handle = NULL; 473 buf->address = NULL; 474 if (buf->dma_handle != NULL) { 475 ddi_dma_free_handle(&buf->dma_handle); 476 buf->dma_handle = NULL; 477 } 478 ixgbe_error(ixgbe, 479 "Could not allocate dma buffer memory: %x", ret); 480 return (IXGBE_FAILURE); 481 } 482 483 ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL, 484 buf->address, 485 len, DDI_DMA_RDWR | DDI_DMA_STREAMING, 486 DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num); 487 488 if (ret != DDI_DMA_MAPPED) { 489 buf->dma_address = NULL; 490 if (buf->acc_handle != NULL) { 491 ddi_dma_mem_free(&buf->acc_handle); 492 buf->acc_handle = NULL; 493 buf->address = NULL; 494 } 495 if (buf->dma_handle != NULL) { 496 ddi_dma_free_handle(&buf->dma_handle); 497 buf->dma_handle = NULL; 498 } 499 ixgbe_error(ixgbe, 500 "Could not bind dma buffer handle: %x", ret); 501 return (IXGBE_FAILURE); 502 } 503 504 ASSERT(cookie_num == 1); 505 506 buf->dma_address = cookie.dmac_laddress; 507 buf->size = len; 508 buf->len = 0; 509 510 return (IXGBE_SUCCESS); 511 } 512 513 /* 514 * ixgbe_free_dma_buffer - Free one allocated area of dma memory and handle. 515 */ 516 static void 517 ixgbe_free_dma_buffer(dma_buffer_t *buf) 518 { 519 if (buf->dma_handle != NULL) { 520 (void) ddi_dma_unbind_handle(buf->dma_handle); 521 buf->dma_address = NULL; 522 } else { 523 return; 524 } 525 526 if (buf->acc_handle != NULL) { 527 ddi_dma_mem_free(&buf->acc_handle); 528 buf->acc_handle = NULL; 529 buf->address = NULL; 530 } 531 532 if (buf->dma_handle != NULL) { 533 ddi_dma_free_handle(&buf->dma_handle); 534 buf->dma_handle = NULL; 535 } 536 537 buf->size = 0; 538 buf->len = 0; 539 } 540 541 /* 542 * ixgbe_alloc_tcb_lists - Memory allocation for the transmit control bolcks 543 * of one ring. 544 */ 545 static int 546 ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *tx_ring) 547 { 548 int i; 549 int ret; 550 tx_control_block_t *tcb; 551 dma_buffer_t *tx_buf; 552 ixgbe_t *ixgbe = tx_ring->ixgbe; 553 dev_info_t *devinfo = ixgbe->dip; 554 555 /* 556 * Allocate memory for the work list. 557 */ 558 tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) * 559 tx_ring->ring_size, KM_NOSLEEP); 560 561 if (tx_ring->work_list == NULL) { 562 ixgbe_error(ixgbe, 563 "Cound not allocate memory for tx work list"); 564 return (IXGBE_FAILURE); 565 } 566 567 /* 568 * Allocate memory for the free list. 569 */ 570 tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) * 571 tx_ring->free_list_size, KM_NOSLEEP); 572 573 if (tx_ring->free_list == NULL) { 574 kmem_free(tx_ring->work_list, 575 sizeof (tx_control_block_t *) * tx_ring->ring_size); 576 tx_ring->work_list = NULL; 577 578 ixgbe_error(ixgbe, 579 "Cound not allocate memory for tx free list"); 580 return (IXGBE_FAILURE); 581 } 582 583 /* 584 * Allocate memory for the tx control blocks of free list. 585 */ 586 tx_ring->tcb_area = 587 kmem_zalloc(sizeof (tx_control_block_t) * 588 tx_ring->free_list_size, KM_NOSLEEP); 589 590 if (tx_ring->tcb_area == NULL) { 591 kmem_free(tx_ring->work_list, 592 sizeof (tx_control_block_t *) * tx_ring->ring_size); 593 tx_ring->work_list = NULL; 594 595 kmem_free(tx_ring->free_list, 596 sizeof (tx_control_block_t *) * tx_ring->free_list_size); 597 tx_ring->free_list = NULL; 598 599 ixgbe_error(ixgbe, 600 "Cound not allocate memory for tx control blocks"); 601 return (IXGBE_FAILURE); 602 } 603 604 /* 605 * Allocate dma memory for the tx control block of free list. 606 */ 607 tcb = tx_ring->tcb_area; 608 for (i = 0; i < tx_ring->free_list_size; i++, tcb++) { 609 ASSERT(tcb != NULL); 610 611 tx_ring->free_list[i] = tcb; 612 613 /* 614 * Pre-allocate dma handles for transmit. These dma handles 615 * will be dynamically bound to the data buffers passed down 616 * from the upper layers at the time of transmitting. 617 */ 618 ret = ddi_dma_alloc_handle(devinfo, 619 &ixgbe_tx_dma_attr, 620 DDI_DMA_DONTWAIT, NULL, 621 &tcb->tx_dma_handle); 622 if (ret != DDI_SUCCESS) { 623 tcb->tx_dma_handle = NULL; 624 ixgbe_error(ixgbe, 625 "Could not allocate tx dma handle: %x", ret); 626 goto alloc_tcb_lists_fail; 627 } 628 629 /* 630 * Pre-allocate transmit buffers for packets that the 631 * size is less than bcopy_thresh. 632 */ 633 tx_buf = &tcb->tx_buf; 634 635 ret = ixgbe_alloc_dma_buffer(ixgbe, 636 tx_buf, ixgbe->tx_buf_size); 637 638 if (ret != IXGBE_SUCCESS) { 639 ASSERT(tcb->tx_dma_handle != NULL); 640 ddi_dma_free_handle(&tcb->tx_dma_handle); 641 tcb->tx_dma_handle = NULL; 642 ixgbe_error(ixgbe, "Allocate tx dma buffer failed"); 643 goto alloc_tcb_lists_fail; 644 } 645 } 646 647 return (IXGBE_SUCCESS); 648 649 alloc_tcb_lists_fail: 650 ixgbe_free_tcb_lists(tx_ring); 651 652 return (IXGBE_FAILURE); 653 } 654 655 /* 656 * ixgbe_free_tcb_lists - Release the memory allocated for 657 * the transmit control bolcks of one ring. 658 */ 659 static void 660 ixgbe_free_tcb_lists(ixgbe_tx_ring_t *tx_ring) 661 { 662 int i; 663 tx_control_block_t *tcb; 664 665 tcb = tx_ring->tcb_area; 666 if (tcb == NULL) 667 return; 668 669 for (i = 0; i < tx_ring->free_list_size; i++, tcb++) { 670 ASSERT(tcb != NULL); 671 672 /* Free the tx dma handle for dynamical binding */ 673 if (tcb->tx_dma_handle != NULL) { 674 ddi_dma_free_handle(&tcb->tx_dma_handle); 675 tcb->tx_dma_handle = NULL; 676 } else { 677 /* 678 * If the dma handle is NULL, then we don't 679 * have to check the remaining. 680 */ 681 break; 682 } 683 684 ixgbe_free_dma_buffer(&tcb->tx_buf); 685 } 686 687 if (tx_ring->tcb_area != NULL) { 688 kmem_free(tx_ring->tcb_area, 689 sizeof (tx_control_block_t) * tx_ring->free_list_size); 690 tx_ring->tcb_area = NULL; 691 } 692 693 if (tx_ring->work_list != NULL) { 694 kmem_free(tx_ring->work_list, 695 sizeof (tx_control_block_t *) * tx_ring->ring_size); 696 tx_ring->work_list = NULL; 697 } 698 699 if (tx_ring->free_list != NULL) { 700 kmem_free(tx_ring->free_list, 701 sizeof (tx_control_block_t *) * tx_ring->free_list_size); 702 tx_ring->free_list = NULL; 703 } 704 } 705 706 /* 707 * ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks 708 * of one ring. 709 */ 710 static int 711 ixgbe_alloc_rcb_lists(ixgbe_rx_ring_t *rx_ring) 712 { 713 int i; 714 int ret; 715 rx_control_block_t *rcb; 716 ixgbe_t *ixgbe = rx_ring->ixgbe; 717 dma_buffer_t *rx_buf; 718 uint32_t rcb_count; 719 720 /* 721 * Allocate memory for the work list. 722 */ 723 rx_ring->work_list = kmem_zalloc(sizeof (rx_control_block_t *) * 724 rx_ring->ring_size, KM_NOSLEEP); 725 726 if (rx_ring->work_list == NULL) { 727 ixgbe_error(ixgbe, 728 "Could not allocate memory for rx work list"); 729 return (IXGBE_FAILURE); 730 } 731 732 /* 733 * Allocate memory for the free list. 734 */ 735 rx_ring->free_list = kmem_zalloc(sizeof (rx_control_block_t *) * 736 rx_ring->free_list_size, KM_NOSLEEP); 737 738 if (rx_ring->free_list == NULL) { 739 kmem_free(rx_ring->work_list, 740 sizeof (rx_control_block_t *) * rx_ring->ring_size); 741 rx_ring->work_list = NULL; 742 743 ixgbe_error(ixgbe, 744 "Cound not allocate memory for rx free list"); 745 return (IXGBE_FAILURE); 746 } 747 748 /* 749 * Allocate memory for the rx control blocks for work list and 750 * free list. 751 */ 752 rcb_count = rx_ring->ring_size + rx_ring->free_list_size; 753 rx_ring->rcb_area = 754 kmem_zalloc(sizeof (rx_control_block_t) * rcb_count, 755 KM_NOSLEEP); 756 757 if (rx_ring->rcb_area == NULL) { 758 kmem_free(rx_ring->work_list, 759 sizeof (rx_control_block_t *) * rx_ring->ring_size); 760 rx_ring->work_list = NULL; 761 762 kmem_free(rx_ring->free_list, 763 sizeof (rx_control_block_t *) * rx_ring->free_list_size); 764 rx_ring->free_list = NULL; 765 766 ixgbe_error(ixgbe, 767 "Cound not allocate memory for rx control blocks"); 768 return (IXGBE_FAILURE); 769 } 770 771 /* 772 * Allocate dma memory for the rx control blocks 773 */ 774 rcb = rx_ring->rcb_area; 775 for (i = 0; i < rcb_count; i++, rcb++) { 776 ASSERT(rcb != NULL); 777 778 if (i < rx_ring->ring_size) { 779 /* Attach the rx control block to the work list */ 780 rx_ring->work_list[i] = rcb; 781 } else { 782 /* Attach the rx control block to the free list */ 783 rx_ring->free_list[i - rx_ring->ring_size] = rcb; 784 } 785 786 rx_buf = &rcb->rx_buf; 787 ret = ixgbe_alloc_dma_buffer(ixgbe, 788 rx_buf, ixgbe->rx_buf_size); 789 790 if (ret != IXGBE_SUCCESS) { 791 ixgbe_error(ixgbe, "Allocate rx dma buffer failed"); 792 goto alloc_rcb_lists_fail; 793 } 794 795 rx_buf->size -= IPHDR_ALIGN_ROOM; 796 rx_buf->address += IPHDR_ALIGN_ROOM; 797 rx_buf->dma_address += IPHDR_ALIGN_ROOM; 798 799 rcb->state = RCB_FREE; 800 rcb->rx_ring = (ixgbe_rx_ring_t *)rx_ring; 801 rcb->free_rtn.free_func = ixgbe_rx_recycle; 802 rcb->free_rtn.free_arg = (char *)rcb; 803 804 rcb->mp = desballoc((unsigned char *) 805 rx_buf->address, 806 rx_buf->size, 807 0, &rcb->free_rtn); 808 } 809 810 return (IXGBE_SUCCESS); 811 812 alloc_rcb_lists_fail: 813 ixgbe_free_rcb_lists(rx_ring); 814 815 return (IXGBE_FAILURE); 816 } 817 818 /* 819 * ixgbe_free_rcb_lists - Free the receive control blocks of one ring. 820 */ 821 static void 822 ixgbe_free_rcb_lists(ixgbe_rx_ring_t *rx_ring) 823 { 824 int i; 825 rx_control_block_t *rcb; 826 uint32_t rcb_count; 827 828 rcb = rx_ring->rcb_area; 829 if (rcb == NULL) 830 return; 831 832 rcb_count = rx_ring->ring_size + rx_ring->free_list_size; 833 for (i = 0; i < rcb_count; i++, rcb++) { 834 ASSERT(rcb != NULL); 835 ASSERT(rcb->state == RCB_FREE); 836 837 if (rcb->mp != NULL) { 838 freemsg(rcb->mp); 839 rcb->mp = NULL; 840 } 841 842 ixgbe_free_dma_buffer(&rcb->rx_buf); 843 } 844 845 if (rx_ring->rcb_area != NULL) { 846 kmem_free(rx_ring->rcb_area, 847 sizeof (rx_control_block_t) * rcb_count); 848 rx_ring->rcb_area = NULL; 849 } 850 851 if (rx_ring->work_list != NULL) { 852 kmem_free(rx_ring->work_list, 853 sizeof (rx_control_block_t *) * rx_ring->ring_size); 854 rx_ring->work_list = NULL; 855 } 856 857 if (rx_ring->free_list != NULL) { 858 kmem_free(rx_ring->free_list, 859 sizeof (rx_control_block_t *) * rx_ring->free_list_size); 860 rx_ring->free_list = NULL; 861 } 862 } 863 864 /* 865 * ixgbe_set_fma_flags - Set the attribute for fma support. 866 */ 867 void 868 ixgbe_set_fma_flags(int acc_flag, int dma_flag) 869 { 870 if (acc_flag) { 871 ixgbe_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 872 } else { 873 ixgbe_desc_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 874 } 875 876 if (dma_flag) { 877 ixgbe_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 878 ixgbe_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 879 ixgbe_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 880 } else { 881 ixgbe_tx_dma_attr.dma_attr_flags = 0; 882 ixgbe_buf_dma_attr.dma_attr_flags = 0; 883 ixgbe_desc_dma_attr.dma_attr_flags = 0; 884 } 885 } 886