1 /* 2 * QEMU Cadence GEM emulation 3 * 4 * Copyright (c) 2011 Xilinx, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> /* For crc32 */ 27 28 #include "hw/irq.h" 29 #include "hw/net/cadence_gem.h" 30 #include "hw/qdev-properties.h" 31 #include "hw/registerfields.h" 32 #include "migration/vmstate.h" 33 #include "qapi/error.h" 34 #include "qemu/log.h" 35 #include "qemu/module.h" 36 #include "sysemu/dma.h" 37 #include "net/checksum.h" 38 #include "net/eth.h" 39 40 #define CADENCE_GEM_ERR_DEBUG 0 41 #define DB_PRINT(...) do {\ 42 if (CADENCE_GEM_ERR_DEBUG) { \ 43 qemu_log(": %s: ", __func__); \ 44 qemu_log(__VA_ARGS__); \ 45 } \ 46 } while (0) 47 48 REG32(NWCTRL, 0x0) /* Network Control reg */ 49 FIELD(NWCTRL, LOOPBACK , 0, 1) 50 FIELD(NWCTRL, LOOPBACK_LOCAL , 1, 1) 51 FIELD(NWCTRL, ENABLE_RECEIVE, 2, 1) 52 FIELD(NWCTRL, ENABLE_TRANSMIT, 3, 1) 53 FIELD(NWCTRL, MAN_PORT_EN , 4, 1) 54 FIELD(NWCTRL, CLEAR_ALL_STATS_REGS , 5, 1) 55 FIELD(NWCTRL, INC_ALL_STATS_REGS, 6, 1) 56 FIELD(NWCTRL, STATS_WRITE_EN, 7, 1) 57 FIELD(NWCTRL, BACK_PRESSURE, 8, 1) 58 FIELD(NWCTRL, TRANSMIT_START , 9, 1) 59 FIELD(NWCTRL, TRANSMIT_HALT, 10, 1) 60 FIELD(NWCTRL, TX_PAUSE_FRAME_RE, 11, 1) 61 FIELD(NWCTRL, TX_PAUSE_FRAME_ZE, 12, 1) 62 FIELD(NWCTRL, STATS_TAKE_SNAP, 13, 1) 63 FIELD(NWCTRL, STATS_READ_SNAP, 14, 1) 64 FIELD(NWCTRL, STORE_RX_TS, 15, 1) 65 FIELD(NWCTRL, PFC_ENABLE, 16, 1) 66 FIELD(NWCTRL, PFC_PRIO_BASED, 17, 1) 67 FIELD(NWCTRL, FLUSH_RX_PKT_PCLK , 18, 1) 68 FIELD(NWCTRL, TX_LPI_EN, 19, 1) 69 FIELD(NWCTRL, PTP_UNICAST_ENA, 20, 1) 70 FIELD(NWCTRL, ALT_SGMII_MODE, 21, 1) 71 FIELD(NWCTRL, STORE_UDP_OFFSET, 22, 1) 72 FIELD(NWCTRL, EXT_TSU_PORT_EN, 23, 1) 73 FIELD(NWCTRL, ONE_STEP_SYNC_MO, 24, 1) 74 FIELD(NWCTRL, PFC_CTRL , 25, 1) 75 FIELD(NWCTRL, EXT_RXQ_SEL_EN , 26, 1) 76 FIELD(NWCTRL, OSS_CORRECTION_FIELD, 27, 1) 77 FIELD(NWCTRL, SEL_MII_ON_RGMII, 28, 1) 78 FIELD(NWCTRL, TWO_PT_FIVE_GIG, 29, 1) 79 FIELD(NWCTRL, IFG_EATS_QAV_CREDIT, 30, 1) 80 81 REG32(NWCFG, 0x4) /* Network Config reg */ 82 FIELD(NWCFG, SPEED, 0, 1) 83 FIELD(NWCFG, FULL_DUPLEX, 1, 1) 84 FIELD(NWCFG, DISCARD_NON_VLAN_FRAMES, 2, 1) 85 FIELD(NWCFG, JUMBO_FRAMES, 3, 1) 86 FIELD(NWCFG, PROMISC, 4, 1) 87 FIELD(NWCFG, NO_BROADCAST, 5, 1) 88 FIELD(NWCFG, MULTICAST_HASH_EN, 6, 1) 89 FIELD(NWCFG, UNICAST_HASH_EN, 7, 1) 90 FIELD(NWCFG, RECV_1536_BYTE_FRAMES, 8, 1) 91 FIELD(NWCFG, EXTERNAL_ADDR_MATCH_EN, 9, 1) 92 FIELD(NWCFG, GIGABIT_MODE_ENABLE, 10, 1) 93 FIELD(NWCFG, PCS_SELECT, 11, 1) 94 FIELD(NWCFG, RETRY_TEST, 12, 1) 95 FIELD(NWCFG, PAUSE_ENABLE, 13, 1) 96 FIELD(NWCFG, RECV_BUF_OFFSET, 14, 2) 97 FIELD(NWCFG, LEN_ERR_DISCARD, 16, 1) 98 FIELD(NWCFG, FCS_REMOVE, 17, 1) 99 FIELD(NWCFG, MDC_CLOCK_DIV, 18, 3) 100 FIELD(NWCFG, DATA_BUS_WIDTH, 21, 2) 101 FIELD(NWCFG, DISABLE_COPY_PAUSE_FRAMES, 23, 1) 102 FIELD(NWCFG, RECV_CSUM_OFFLOAD_EN, 24, 1) 103 FIELD(NWCFG, EN_HALF_DUPLEX_RX, 25, 1) 104 FIELD(NWCFG, IGNORE_RX_FCS, 26, 1) 105 FIELD(NWCFG, SGMII_MODE_ENABLE, 27, 1) 106 FIELD(NWCFG, IPG_STRETCH_ENABLE, 28, 1) 107 FIELD(NWCFG, NSP_ACCEPT, 29, 1) 108 FIELD(NWCFG, IGNORE_IPG_RX_ER, 30, 1) 109 FIELD(NWCFG, UNI_DIRECTION_ENABLE, 31, 1) 110 111 REG32(NWSTATUS, 0x8) /* Network Status reg */ 112 REG32(USERIO, 0xc) /* User IO reg */ 113 114 REG32(DMACFG, 0x10) /* DMA Control reg */ 115 FIELD(DMACFG, SEND_BCAST_TO_ALL_QS, 31, 1) 116 FIELD(DMACFG, DMA_ADDR_BUS_WIDTH, 30, 1) 117 FIELD(DMACFG, TX_BD_EXT_MODE_EN , 29, 1) 118 FIELD(DMACFG, RX_BD_EXT_MODE_EN , 28, 1) 119 FIELD(DMACFG, FORCE_MAX_AMBA_BURST_TX, 26, 1) 120 FIELD(DMACFG, FORCE_MAX_AMBA_BURST_RX, 25, 1) 121 FIELD(DMACFG, FORCE_DISCARD_ON_ERR, 24, 1) 122 FIELD(DMACFG, RX_BUF_SIZE, 16, 8) 123 FIELD(DMACFG, CRC_ERROR_REPORT, 13, 1) 124 FIELD(DMACFG, INF_LAST_DBUF_SIZE_EN, 12, 1) 125 FIELD(DMACFG, TX_PBUF_CSUM_OFFLOAD, 11, 1) 126 FIELD(DMACFG, TX_PBUF_SIZE, 10, 1) 127 FIELD(DMACFG, RX_PBUF_SIZE, 8, 2) 128 FIELD(DMACFG, ENDIAN_SWAP_PACKET, 7, 1) 129 FIELD(DMACFG, ENDIAN_SWAP_MGNT, 6, 1) 130 FIELD(DMACFG, HDR_DATA_SPLIT_EN, 5, 1) 131 FIELD(DMACFG, AMBA_BURST_LEN , 0, 5) 132 #define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */ 133 134 REG32(TXSTATUS, 0x14) /* TX Status reg */ 135 FIELD(TXSTATUS, TX_USED_BIT_READ_MIDFRAME, 12, 1) 136 FIELD(TXSTATUS, TX_FRAME_TOO_LARGE, 11, 1) 137 FIELD(TXSTATUS, TX_DMA_LOCKUP, 10, 1) 138 FIELD(TXSTATUS, TX_MAC_LOCKUP, 9, 1) 139 FIELD(TXSTATUS, RESP_NOT_OK, 8, 1) 140 FIELD(TXSTATUS, LATE_COLLISION, 7, 1) 141 FIELD(TXSTATUS, TRANSMIT_UNDER_RUN, 6, 1) 142 FIELD(TXSTATUS, TRANSMIT_COMPLETE, 5, 1) 143 FIELD(TXSTATUS, AMBA_ERROR, 4, 1) 144 FIELD(TXSTATUS, TRANSMIT_GO, 3, 1) 145 FIELD(TXSTATUS, RETRY_LIMIT, 2, 1) 146 FIELD(TXSTATUS, COLLISION, 1, 1) 147 FIELD(TXSTATUS, USED_BIT_READ, 0, 1) 148 149 REG32(RXQBASE, 0x18) /* RX Q Base address reg */ 150 REG32(TXQBASE, 0x1c) /* TX Q Base address reg */ 151 REG32(RXSTATUS, 0x20) /* RX Status reg */ 152 FIELD(RXSTATUS, RX_DMA_LOCKUP, 5, 1) 153 FIELD(RXSTATUS, RX_MAC_LOCKUP, 4, 1) 154 FIELD(RXSTATUS, RESP_NOT_OK, 3, 1) 155 FIELD(RXSTATUS, RECEIVE_OVERRUN, 2, 1) 156 FIELD(RXSTATUS, FRAME_RECEIVED, 1, 1) 157 FIELD(RXSTATUS, BUF_NOT_AVAILABLE, 0, 1) 158 159 REG32(ISR, 0x24) /* Interrupt Status reg */ 160 REG32(IER, 0x28) /* Interrupt Enable reg */ 161 REG32(IDR, 0x2c) /* Interrupt Disable reg */ 162 REG32(IMR, 0x30) /* Interrupt Mask reg */ 163 REG32(PHYMNTNC, 0x34) /* Phy Maintenance reg */ 164 REG32(RXPAUSE, 0x38) /* RX Pause Time reg */ 165 REG32(TXPAUSE, 0x3c) /* TX Pause Time reg */ 166 REG32(TXPARTIALSF, 0x40) /* TX Partial Store and Forward */ 167 REG32(RXPARTIALSF, 0x44) /* RX Partial Store and Forward */ 168 REG32(JUMBO_MAX_LEN, 0x48) /* Max Jumbo Frame Size */ 169 REG32(HASHLO, 0x80) /* Hash Low address reg */ 170 REG32(HASHHI, 0x84) /* Hash High address reg */ 171 REG32(SPADDR1LO, 0x88) /* Specific addr 1 low reg */ 172 REG32(SPADDR1HI, 0x8c) /* Specific addr 1 high reg */ 173 REG32(SPADDR2LO, 0x90) /* Specific addr 2 low reg */ 174 REG32(SPADDR2HI, 0x94) /* Specific addr 2 high reg */ 175 REG32(SPADDR3LO, 0x98) /* Specific addr 3 low reg */ 176 REG32(SPADDR3HI, 0x9c) /* Specific addr 3 high reg */ 177 REG32(SPADDR4LO, 0xa0) /* Specific addr 4 low reg */ 178 REG32(SPADDR4HI, 0xa4) /* Specific addr 4 high reg */ 179 REG32(TIDMATCH1, 0xa8) /* Type ID1 Match reg */ 180 REG32(TIDMATCH2, 0xac) /* Type ID2 Match reg */ 181 REG32(TIDMATCH3, 0xb0) /* Type ID3 Match reg */ 182 REG32(TIDMATCH4, 0xb4) /* Type ID4 Match reg */ 183 REG32(WOLAN, 0xb8) /* Wake on LAN reg */ 184 REG32(IPGSTRETCH, 0xbc) /* IPG Stretch reg */ 185 REG32(SVLAN, 0xc0) /* Stacked VLAN reg */ 186 REG32(MODID, 0xfc) /* Module ID reg */ 187 REG32(OCTTXLO, 0x100) /* Octects transmitted Low reg */ 188 REG32(OCTTXHI, 0x104) /* Octects transmitted High reg */ 189 REG32(TXCNT, 0x108) /* Error-free Frames transmitted */ 190 REG32(TXBCNT, 0x10c) /* Error-free Broadcast Frames */ 191 REG32(TXMCNT, 0x110) /* Error-free Multicast Frame */ 192 REG32(TXPAUSECNT, 0x114) /* Pause Frames Transmitted */ 193 REG32(TX64CNT, 0x118) /* Error-free 64 TX */ 194 REG32(TX65CNT, 0x11c) /* Error-free 65-127 TX */ 195 REG32(TX128CNT, 0x120) /* Error-free 128-255 TX */ 196 REG32(TX256CNT, 0x124) /* Error-free 256-511 */ 197 REG32(TX512CNT, 0x128) /* Error-free 512-1023 TX */ 198 REG32(TX1024CNT, 0x12c) /* Error-free 1024-1518 TX */ 199 REG32(TX1519CNT, 0x130) /* Error-free larger than 1519 TX */ 200 REG32(TXURUNCNT, 0x134) /* TX under run error counter */ 201 REG32(SINGLECOLLCNT, 0x138) /* Single Collision Frames */ 202 REG32(MULTCOLLCNT, 0x13c) /* Multiple Collision Frames */ 203 REG32(EXCESSCOLLCNT, 0x140) /* Excessive Collision Frames */ 204 REG32(LATECOLLCNT, 0x144) /* Late Collision Frames */ 205 REG32(DEFERTXCNT, 0x148) /* Deferred Transmission Frames */ 206 REG32(CSENSECNT, 0x14c) /* Carrier Sense Error Counter */ 207 REG32(OCTRXLO, 0x150) /* Octects Received register Low */ 208 REG32(OCTRXHI, 0x154) /* Octects Received register High */ 209 REG32(RXCNT, 0x158) /* Error-free Frames Received */ 210 REG32(RXBROADCNT, 0x15c) /* Error-free Broadcast Frames RX */ 211 REG32(RXMULTICNT, 0x160) /* Error-free Multicast Frames RX */ 212 REG32(RXPAUSECNT, 0x164) /* Pause Frames Received Counter */ 213 REG32(RX64CNT, 0x168) /* Error-free 64 byte Frames RX */ 214 REG32(RX65CNT, 0x16c) /* Error-free 65-127B Frames RX */ 215 REG32(RX128CNT, 0x170) /* Error-free 128-255B Frames RX */ 216 REG32(RX256CNT, 0x174) /* Error-free 256-512B Frames RX */ 217 REG32(RX512CNT, 0x178) /* Error-free 512-1023B Frames RX */ 218 REG32(RX1024CNT, 0x17c) /* Error-free 1024-1518B Frames RX */ 219 REG32(RX1519CNT, 0x180) /* Error-free 1519-max Frames RX */ 220 REG32(RXUNDERCNT, 0x184) /* Undersize Frames Received */ 221 REG32(RXOVERCNT, 0x188) /* Oversize Frames Received */ 222 REG32(RXJABCNT, 0x18c) /* Jabbers Received Counter */ 223 REG32(RXFCSCNT, 0x190) /* Frame Check seq. Error Counter */ 224 REG32(RXLENERRCNT, 0x194) /* Length Field Error Counter */ 225 REG32(RXSYMERRCNT, 0x198) /* Symbol Error Counter */ 226 REG32(RXALIGNERRCNT, 0x19c) /* Alignment Error Counter */ 227 REG32(RXRSCERRCNT, 0x1a0) /* Receive Resource Error Counter */ 228 REG32(RXORUNCNT, 0x1a4) /* Receive Overrun Counter */ 229 REG32(RXIPCSERRCNT, 0x1a8) /* IP header Checksum Err Counter */ 230 REG32(RXTCPCCNT, 0x1ac) /* TCP Checksum Error Counter */ 231 REG32(RXUDPCCNT, 0x1b0) /* UDP Checksum Error Counter */ 232 233 REG32(1588S, 0x1d0) /* 1588 Timer Seconds */ 234 REG32(1588NS, 0x1d4) /* 1588 Timer Nanoseconds */ 235 REG32(1588ADJ, 0x1d8) /* 1588 Timer Adjust */ 236 REG32(1588INC, 0x1dc) /* 1588 Timer Increment */ 237 REG32(PTPETXS, 0x1e0) /* PTP Event Frame Transmitted (s) */ 238 REG32(PTPETXNS, 0x1e4) /* PTP Event Frame Transmitted (ns) */ 239 REG32(PTPERXS, 0x1e8) /* PTP Event Frame Received (s) */ 240 REG32(PTPERXNS, 0x1ec) /* PTP Event Frame Received (ns) */ 241 REG32(PTPPTXS, 0x1e0) /* PTP Peer Frame Transmitted (s) */ 242 REG32(PTPPTXNS, 0x1e4) /* PTP Peer Frame Transmitted (ns) */ 243 REG32(PTPPRXS, 0x1e8) /* PTP Peer Frame Received (s) */ 244 REG32(PTPPRXNS, 0x1ec) /* PTP Peer Frame Received (ns) */ 245 246 /* Design Configuration Registers */ 247 REG32(DESCONF, 0x280) 248 REG32(DESCONF2, 0x284) 249 REG32(DESCONF3, 0x288) 250 REG32(DESCONF4, 0x28c) 251 REG32(DESCONF5, 0x290) 252 REG32(DESCONF6, 0x294) 253 #define GEM_DESCONF6_64B_MASK (1U << 23) 254 REG32(DESCONF7, 0x298) 255 256 REG32(INT_Q1_STATUS, 0x400) 257 REG32(INT_Q1_MASK, 0x640) 258 259 REG32(TRANSMIT_Q1_PTR, 0x440) 260 REG32(TRANSMIT_Q7_PTR, 0x458) 261 262 REG32(RECEIVE_Q1_PTR, 0x480) 263 REG32(RECEIVE_Q7_PTR, 0x498) 264 265 REG32(TBQPH, 0x4c8) 266 REG32(RBQPH, 0x4d4) 267 268 REG32(INT_Q1_ENABLE, 0x600) 269 REG32(INT_Q7_ENABLE, 0x618) 270 271 REG32(INT_Q1_DISABLE, 0x620) 272 REG32(INT_Q7_DISABLE, 0x638) 273 274 REG32(SCREENING_TYPE1_REG0, 0x500) 275 FIELD(SCREENING_TYPE1_REG0, QUEUE_NUM, 0, 4) 276 FIELD(SCREENING_TYPE1_REG0, DSTC_MATCH, 4, 8) 277 FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH, 12, 16) 278 FIELD(SCREENING_TYPE1_REG0, DSTC_ENABLE, 28, 1) 279 FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN, 29, 1) 280 FIELD(SCREENING_TYPE1_REG0, DROP_ON_MATCH, 30, 1) 281 282 REG32(SCREENING_TYPE2_REG0, 0x540) 283 FIELD(SCREENING_TYPE2_REG0, QUEUE_NUM, 0, 4) 284 FIELD(SCREENING_TYPE2_REG0, VLAN_PRIORITY, 4, 3) 285 FIELD(SCREENING_TYPE2_REG0, VLAN_ENABLE, 8, 1) 286 FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_REG_INDEX, 9, 3) 287 FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE, 12, 1) 288 FIELD(SCREENING_TYPE2_REG0, COMPARE_A, 13, 5) 289 FIELD(SCREENING_TYPE2_REG0, COMPARE_A_ENABLE, 18, 1) 290 FIELD(SCREENING_TYPE2_REG0, COMPARE_B, 19, 5) 291 FIELD(SCREENING_TYPE2_REG0, COMPARE_B_ENABLE, 24, 1) 292 FIELD(SCREENING_TYPE2_REG0, COMPARE_C, 25, 5) 293 FIELD(SCREENING_TYPE2_REG0, COMPARE_C_ENABLE, 30, 1) 294 FIELD(SCREENING_TYPE2_REG0, DROP_ON_MATCH, 31, 1) 295 296 REG32(SCREENING_TYPE2_ETHERTYPE_REG0, 0x6e0) 297 298 REG32(TYPE2_COMPARE_0_WORD_0, 0x700) 299 FIELD(TYPE2_COMPARE_0_WORD_0, MASK_VALUE, 0, 16) 300 FIELD(TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE, 16, 16) 301 302 REG32(TYPE2_COMPARE_0_WORD_1, 0x704) 303 FIELD(TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE, 0, 7) 304 FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET, 7, 2) 305 FIELD(TYPE2_COMPARE_0_WORD_1, DISABLE_MASK, 9, 1) 306 FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_VLAN_ID, 10, 1) 307 308 /*****************************************/ 309 310 311 /* GEM_ISR GEM_IER GEM_IDR GEM_IMR */ 312 #define GEM_INT_TXCMPL 0x00000080 /* Transmit Complete */ 313 #define GEM_INT_AMBA_ERR 0x00000040 314 #define GEM_INT_TXUSED 0x00000008 315 #define GEM_INT_RXUSED 0x00000004 316 #define GEM_INT_RXCMPL 0x00000002 317 318 #define GEM_PHYMNTNC_OP_R 0x20000000 /* read operation */ 319 #define GEM_PHYMNTNC_OP_W 0x10000000 /* write operation */ 320 #define GEM_PHYMNTNC_ADDR 0x0F800000 /* Address bits */ 321 #define GEM_PHYMNTNC_ADDR_SHFT 23 322 #define GEM_PHYMNTNC_REG 0x007C0000 /* register bits */ 323 #define GEM_PHYMNTNC_REG_SHIFT 18 324 325 /* Marvell PHY definitions */ 326 #define BOARD_PHY_ADDRESS 0 /* PHY address we will emulate a device at */ 327 328 #define PHY_REG_CONTROL 0 329 #define PHY_REG_STATUS 1 330 #define PHY_REG_PHYID1 2 331 #define PHY_REG_PHYID2 3 332 #define PHY_REG_ANEGADV 4 333 #define PHY_REG_LINKPABIL 5 334 #define PHY_REG_ANEGEXP 6 335 #define PHY_REG_NEXTP 7 336 #define PHY_REG_LINKPNEXTP 8 337 #define PHY_REG_100BTCTRL 9 338 #define PHY_REG_1000BTSTAT 10 339 #define PHY_REG_EXTSTAT 15 340 #define PHY_REG_PHYSPCFC_CTL 16 341 #define PHY_REG_PHYSPCFC_ST 17 342 #define PHY_REG_INT_EN 18 343 #define PHY_REG_INT_ST 19 344 #define PHY_REG_EXT_PHYSPCFC_CTL 20 345 #define PHY_REG_RXERR 21 346 #define PHY_REG_EACD 22 347 #define PHY_REG_LED 24 348 #define PHY_REG_LED_OVRD 25 349 #define PHY_REG_EXT_PHYSPCFC_CTL2 26 350 #define PHY_REG_EXT_PHYSPCFC_ST 27 351 #define PHY_REG_CABLE_DIAG 28 352 353 #define PHY_REG_CONTROL_RST 0x8000 354 #define PHY_REG_CONTROL_LOOP 0x4000 355 #define PHY_REG_CONTROL_ANEG 0x1000 356 #define PHY_REG_CONTROL_ANRESTART 0x0200 357 358 #define PHY_REG_STATUS_LINK 0x0004 359 #define PHY_REG_STATUS_ANEGCMPL 0x0020 360 361 #define PHY_REG_INT_ST_ANEGCMPL 0x0800 362 #define PHY_REG_INT_ST_LINKC 0x0400 363 #define PHY_REG_INT_ST_ENERGY 0x0010 364 365 /***********************************************************************/ 366 #define GEM_RX_REJECT (-1) 367 #define GEM_RX_PROMISCUOUS_ACCEPT (-2) 368 #define GEM_RX_BROADCAST_ACCEPT (-3) 369 #define GEM_RX_MULTICAST_HASH_ACCEPT (-4) 370 #define GEM_RX_UNICAST_HASH_ACCEPT (-5) 371 372 #define GEM_RX_SAR_ACCEPT 0 373 374 /***********************************************************************/ 375 376 #define DESC_1_USED 0x80000000 377 #define DESC_1_LENGTH 0x00001FFF 378 379 #define DESC_1_TX_WRAP 0x40000000 380 #define DESC_1_TX_LAST 0x00008000 381 382 #define DESC_0_RX_WRAP 0x00000002 383 #define DESC_0_RX_OWNERSHIP 0x00000001 384 385 #define R_DESC_1_RX_SAR_SHIFT 25 386 #define R_DESC_1_RX_SAR_LENGTH 2 387 #define R_DESC_1_RX_SAR_MATCH (1 << 27) 388 #define R_DESC_1_RX_UNICAST_HASH (1 << 29) 389 #define R_DESC_1_RX_MULTICAST_HASH (1 << 30) 390 #define R_DESC_1_RX_BROADCAST (1 << 31) 391 392 #define DESC_1_RX_SOF 0x00004000 393 #define DESC_1_RX_EOF 0x00008000 394 395 #define GEM_MODID_VALUE 0x00020118 396 397 static inline uint64_t tx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc) 398 { 399 uint64_t ret = desc[0]; 400 401 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 402 ret |= (uint64_t)desc[2] << 32; 403 } 404 return ret; 405 } 406 407 static inline unsigned tx_desc_get_used(uint32_t *desc) 408 { 409 return (desc[1] & DESC_1_USED) ? 1 : 0; 410 } 411 412 static inline void tx_desc_set_used(uint32_t *desc) 413 { 414 desc[1] |= DESC_1_USED; 415 } 416 417 static inline unsigned tx_desc_get_wrap(uint32_t *desc) 418 { 419 return (desc[1] & DESC_1_TX_WRAP) ? 1 : 0; 420 } 421 422 static inline unsigned tx_desc_get_last(uint32_t *desc) 423 { 424 return (desc[1] & DESC_1_TX_LAST) ? 1 : 0; 425 } 426 427 static inline unsigned tx_desc_get_length(uint32_t *desc) 428 { 429 return desc[1] & DESC_1_LENGTH; 430 } 431 432 static inline void print_gem_tx_desc(uint32_t *desc, uint8_t queue) 433 { 434 DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue); 435 DB_PRINT("bufaddr: 0x%08x\n", *desc); 436 DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc)); 437 DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc)); 438 DB_PRINT("last: %d\n", tx_desc_get_last(desc)); 439 DB_PRINT("length: %d\n", tx_desc_get_length(desc)); 440 } 441 442 static inline uint64_t rx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc) 443 { 444 uint64_t ret = desc[0] & ~0x3UL; 445 446 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 447 ret |= (uint64_t)desc[2] << 32; 448 } 449 return ret; 450 } 451 452 static inline int gem_get_desc_len(CadenceGEMState *s, bool rx_n_tx) 453 { 454 int ret = 2; 455 456 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 457 ret += 2; 458 } 459 if (s->regs[R_DMACFG] & (rx_n_tx ? R_DMACFG_RX_BD_EXT_MODE_EN_MASK 460 : R_DMACFG_TX_BD_EXT_MODE_EN_MASK)) { 461 ret += 2; 462 } 463 464 assert(ret <= DESC_MAX_NUM_WORDS); 465 return ret; 466 } 467 468 static inline unsigned rx_desc_get_wrap(uint32_t *desc) 469 { 470 return desc[0] & DESC_0_RX_WRAP ? 1 : 0; 471 } 472 473 static inline unsigned rx_desc_get_ownership(uint32_t *desc) 474 { 475 return desc[0] & DESC_0_RX_OWNERSHIP ? 1 : 0; 476 } 477 478 static inline void rx_desc_set_ownership(uint32_t *desc) 479 { 480 desc[0] |= DESC_0_RX_OWNERSHIP; 481 } 482 483 static inline void rx_desc_set_sof(uint32_t *desc) 484 { 485 desc[1] |= DESC_1_RX_SOF; 486 } 487 488 static inline void rx_desc_clear_control(uint32_t *desc) 489 { 490 desc[1] = 0; 491 } 492 493 static inline void rx_desc_set_eof(uint32_t *desc) 494 { 495 desc[1] |= DESC_1_RX_EOF; 496 } 497 498 static inline void rx_desc_set_length(uint32_t *desc, unsigned len) 499 { 500 desc[1] &= ~DESC_1_LENGTH; 501 desc[1] |= len; 502 } 503 504 static inline void rx_desc_set_broadcast(uint32_t *desc) 505 { 506 desc[1] |= R_DESC_1_RX_BROADCAST; 507 } 508 509 static inline void rx_desc_set_unicast_hash(uint32_t *desc) 510 { 511 desc[1] |= R_DESC_1_RX_UNICAST_HASH; 512 } 513 514 static inline void rx_desc_set_multicast_hash(uint32_t *desc) 515 { 516 desc[1] |= R_DESC_1_RX_MULTICAST_HASH; 517 } 518 519 static inline void rx_desc_set_sar(uint32_t *desc, int sar_idx) 520 { 521 desc[1] = deposit32(desc[1], R_DESC_1_RX_SAR_SHIFT, R_DESC_1_RX_SAR_LENGTH, 522 sar_idx); 523 desc[1] |= R_DESC_1_RX_SAR_MATCH; 524 } 525 526 /* The broadcast MAC address: 0xFFFFFFFFFFFF */ 527 static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 528 529 static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx) 530 { 531 uint32_t size; 532 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, JUMBO_FRAMES)) { 533 size = s->regs[R_JUMBO_MAX_LEN]; 534 if (size > s->jumbo_max_len) { 535 size = s->jumbo_max_len; 536 qemu_log_mask(LOG_GUEST_ERROR, "GEM_JUMBO_MAX_LEN reg cannot be" 537 " greater than 0x%" PRIx32 "\n", s->jumbo_max_len); 538 } 539 } else if (tx) { 540 size = 1518; 541 } else { 542 size = FIELD_EX32(s->regs[R_NWCFG], 543 NWCFG, RECV_1536_BYTE_FRAMES) ? 1538 : 1518; 544 } 545 return size; 546 } 547 548 static void gem_set_isr(CadenceGEMState *s, int q, uint32_t flag) 549 { 550 if (q == 0) { 551 s->regs[R_ISR] |= flag & ~(s->regs[R_IMR]); 552 } else { 553 s->regs[R_INT_Q1_STATUS + q - 1] |= flag & 554 ~(s->regs[R_INT_Q1_MASK + q - 1]); 555 } 556 } 557 558 /* 559 * gem_init_register_masks: 560 * One time initialization. 561 * Set masks to identify which register bits have magical clear properties 562 */ 563 static void gem_init_register_masks(CadenceGEMState *s) 564 { 565 unsigned int i; 566 /* Mask of register bits which are read only */ 567 memset(&s->regs_ro[0], 0, sizeof(s->regs_ro)); 568 s->regs_ro[R_NWCTRL] = 0xFFF80000; 569 s->regs_ro[R_NWSTATUS] = 0xFFFFFFFF; 570 s->regs_ro[R_DMACFG] = 0x8E00F000; 571 s->regs_ro[R_TXSTATUS] = 0xFFFFFE08; 572 s->regs_ro[R_RXQBASE] = 0x00000003; 573 s->regs_ro[R_TXQBASE] = 0x00000003; 574 s->regs_ro[R_RXSTATUS] = 0xFFFFFFF0; 575 s->regs_ro[R_ISR] = 0xFFFFFFFF; 576 s->regs_ro[R_IMR] = 0xFFFFFFFF; 577 s->regs_ro[R_MODID] = 0xFFFFFFFF; 578 for (i = 0; i < s->num_priority_queues; i++) { 579 s->regs_ro[R_INT_Q1_STATUS + i] = 0xFFFFFFFF; 580 s->regs_ro[R_INT_Q1_ENABLE + i] = 0xFFFFF319; 581 s->regs_ro[R_INT_Q1_DISABLE + i] = 0xFFFFF319; 582 s->regs_ro[R_INT_Q1_MASK + i] = 0xFFFFFFFF; 583 } 584 585 /* Mask of register bits which are clear on read */ 586 memset(&s->regs_rtc[0], 0, sizeof(s->regs_rtc)); 587 s->regs_rtc[R_ISR] = 0xFFFFFFFF; 588 for (i = 0; i < s->num_priority_queues; i++) { 589 s->regs_rtc[R_INT_Q1_STATUS + i] = 0x00000CE6; 590 } 591 592 /* Mask of register bits which are write 1 to clear */ 593 memset(&s->regs_w1c[0], 0, sizeof(s->regs_w1c)); 594 s->regs_w1c[R_TXSTATUS] = 0x000001F7; 595 s->regs_w1c[R_RXSTATUS] = 0x0000000F; 596 597 /* Mask of register bits which are write only */ 598 memset(&s->regs_wo[0], 0, sizeof(s->regs_wo)); 599 s->regs_wo[R_NWCTRL] = 0x00073E60; 600 s->regs_wo[R_IER] = 0x07FFFFFF; 601 s->regs_wo[R_IDR] = 0x07FFFFFF; 602 for (i = 0; i < s->num_priority_queues; i++) { 603 s->regs_wo[R_INT_Q1_ENABLE + i] = 0x00000CE6; 604 s->regs_wo[R_INT_Q1_DISABLE + i] = 0x00000CE6; 605 } 606 } 607 608 /* 609 * phy_update_link: 610 * Make the emulated PHY link state match the QEMU "interface" state. 611 */ 612 static void phy_update_link(CadenceGEMState *s) 613 { 614 DB_PRINT("down %d\n", qemu_get_queue(s->nic)->link_down); 615 616 /* Autonegotiation status mirrors link status. */ 617 if (qemu_get_queue(s->nic)->link_down) { 618 s->phy_regs[PHY_REG_STATUS] &= ~(PHY_REG_STATUS_ANEGCMPL | 619 PHY_REG_STATUS_LINK); 620 s->phy_regs[PHY_REG_INT_ST] |= PHY_REG_INT_ST_LINKC; 621 } else { 622 s->phy_regs[PHY_REG_STATUS] |= (PHY_REG_STATUS_ANEGCMPL | 623 PHY_REG_STATUS_LINK); 624 s->phy_regs[PHY_REG_INT_ST] |= (PHY_REG_INT_ST_LINKC | 625 PHY_REG_INT_ST_ANEGCMPL | 626 PHY_REG_INT_ST_ENERGY); 627 } 628 } 629 630 static bool gem_can_receive(NetClientState *nc) 631 { 632 CadenceGEMState *s; 633 int i; 634 635 s = qemu_get_nic_opaque(nc); 636 637 /* Do nothing if receive is not enabled. */ 638 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_RECEIVE)) { 639 if (s->can_rx_state != 1) { 640 s->can_rx_state = 1; 641 DB_PRINT("can't receive - no enable\n"); 642 } 643 return false; 644 } 645 646 for (i = 0; i < s->num_priority_queues; i++) { 647 if (rx_desc_get_ownership(s->rx_desc[i]) != 1) { 648 break; 649 } 650 }; 651 652 if (i == s->num_priority_queues) { 653 if (s->can_rx_state != 2) { 654 s->can_rx_state = 2; 655 DB_PRINT("can't receive - all the buffer descriptors are busy\n"); 656 } 657 return false; 658 } 659 660 if (s->can_rx_state != 0) { 661 s->can_rx_state = 0; 662 DB_PRINT("can receive\n"); 663 } 664 return true; 665 } 666 667 /* 668 * gem_update_int_status: 669 * Raise or lower interrupt based on current status. 670 */ 671 static void gem_update_int_status(CadenceGEMState *s) 672 { 673 int i; 674 675 qemu_set_irq(s->irq[0], !!s->regs[R_ISR]); 676 677 for (i = 1; i < s->num_priority_queues; ++i) { 678 qemu_set_irq(s->irq[i], !!s->regs[R_INT_Q1_STATUS + i - 1]); 679 } 680 } 681 682 /* 683 * gem_receive_updatestats: 684 * Increment receive statistics. 685 */ 686 static void gem_receive_updatestats(CadenceGEMState *s, const uint8_t *packet, 687 unsigned bytes) 688 { 689 uint64_t octets; 690 691 /* Total octets (bytes) received */ 692 octets = ((uint64_t)(s->regs[R_OCTRXLO]) << 32) | 693 s->regs[R_OCTRXHI]; 694 octets += bytes; 695 s->regs[R_OCTRXLO] = octets >> 32; 696 s->regs[R_OCTRXHI] = octets; 697 698 /* Error-free Frames received */ 699 s->regs[R_RXCNT]++; 700 701 /* Error-free Broadcast Frames counter */ 702 if (!memcmp(packet, broadcast_addr, 6)) { 703 s->regs[R_RXBROADCNT]++; 704 } 705 706 /* Error-free Multicast Frames counter */ 707 if (packet[0] == 0x01) { 708 s->regs[R_RXMULTICNT]++; 709 } 710 711 if (bytes <= 64) { 712 s->regs[R_RX64CNT]++; 713 } else if (bytes <= 127) { 714 s->regs[R_RX65CNT]++; 715 } else if (bytes <= 255) { 716 s->regs[R_RX128CNT]++; 717 } else if (bytes <= 511) { 718 s->regs[R_RX256CNT]++; 719 } else if (bytes <= 1023) { 720 s->regs[R_RX512CNT]++; 721 } else if (bytes <= 1518) { 722 s->regs[R_RX1024CNT]++; 723 } else { 724 s->regs[R_RX1519CNT]++; 725 } 726 } 727 728 /* 729 * Get the MAC Address bit from the specified position 730 */ 731 static unsigned get_bit(const uint8_t *mac, unsigned bit) 732 { 733 unsigned byte; 734 735 byte = mac[bit / 8]; 736 byte >>= (bit & 0x7); 737 byte &= 1; 738 739 return byte; 740 } 741 742 /* 743 * Calculate a GEM MAC Address hash index 744 */ 745 static unsigned calc_mac_hash(const uint8_t *mac) 746 { 747 int index_bit, mac_bit; 748 unsigned hash_index; 749 750 hash_index = 0; 751 mac_bit = 5; 752 for (index_bit = 5; index_bit >= 0; index_bit--) { 753 hash_index |= (get_bit(mac, mac_bit) ^ 754 get_bit(mac, mac_bit + 6) ^ 755 get_bit(mac, mac_bit + 12) ^ 756 get_bit(mac, mac_bit + 18) ^ 757 get_bit(mac, mac_bit + 24) ^ 758 get_bit(mac, mac_bit + 30) ^ 759 get_bit(mac, mac_bit + 36) ^ 760 get_bit(mac, mac_bit + 42)) << index_bit; 761 mac_bit--; 762 } 763 764 return hash_index; 765 } 766 767 /* 768 * gem_mac_address_filter: 769 * Accept or reject this destination address? 770 * Returns: 771 * GEM_RX_REJECT: reject 772 * >= 0: Specific address accept (which matched SAR is returned) 773 * others for various other modes of accept: 774 * GEM_RM_PROMISCUOUS_ACCEPT, GEM_RX_BROADCAST_ACCEPT, 775 * GEM_RX_MULTICAST_HASH_ACCEPT or GEM_RX_UNICAST_HASH_ACCEPT 776 */ 777 static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet) 778 { 779 uint8_t *gem_spaddr; 780 int i, is_mc; 781 782 /* Promiscuous mode? */ 783 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, PROMISC)) { 784 return GEM_RX_PROMISCUOUS_ACCEPT; 785 } 786 787 if (!memcmp(packet, broadcast_addr, 6)) { 788 /* Reject broadcast packets? */ 789 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, NO_BROADCAST)) { 790 return GEM_RX_REJECT; 791 } 792 return GEM_RX_BROADCAST_ACCEPT; 793 } 794 795 /* Accept packets -w- hash match? */ 796 is_mc = is_multicast_ether_addr(packet); 797 if ((is_mc && (FIELD_EX32(s->regs[R_NWCFG], NWCFG, MULTICAST_HASH_EN))) || 798 (!is_mc && FIELD_EX32(s->regs[R_NWCFG], NWCFG, UNICAST_HASH_EN))) { 799 uint64_t buckets; 800 unsigned hash_index; 801 802 hash_index = calc_mac_hash(packet); 803 buckets = ((uint64_t)s->regs[R_HASHHI] << 32) | s->regs[R_HASHLO]; 804 if ((buckets >> hash_index) & 1) { 805 return is_mc ? GEM_RX_MULTICAST_HASH_ACCEPT 806 : GEM_RX_UNICAST_HASH_ACCEPT; 807 } 808 } 809 810 /* Check all 4 specific addresses */ 811 gem_spaddr = (uint8_t *)&(s->regs[R_SPADDR1LO]); 812 for (i = 3; i >= 0; i--) { 813 if (s->sar_active[i] && !memcmp(packet, gem_spaddr + 8 * i, 6)) { 814 return GEM_RX_SAR_ACCEPT + i; 815 } 816 } 817 818 /* No address match; reject the packet */ 819 return GEM_RX_REJECT; 820 } 821 822 /* Figure out which queue the received data should be sent to */ 823 static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, 824 unsigned rxbufsize) 825 { 826 uint32_t reg; 827 bool matched, mismatched; 828 int i, j; 829 830 for (i = 0; i < s->num_type1_screeners; i++) { 831 reg = s->regs[R_SCREENING_TYPE1_REG0 + i]; 832 matched = false; 833 mismatched = false; 834 835 /* Screening is based on UDP Port */ 836 if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN)) { 837 uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23]; 838 if (udp_port == FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH)) { 839 matched = true; 840 } else { 841 mismatched = true; 842 } 843 } 844 845 /* Screening is based on DS/TC */ 846 if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_ENABLE)) { 847 uint8_t dscp = rxbuf_ptr[14 + 1]; 848 if (dscp == FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_MATCH)) { 849 matched = true; 850 } else { 851 mismatched = true; 852 } 853 } 854 855 if (matched && !mismatched) { 856 return FIELD_EX32(reg, SCREENING_TYPE1_REG0, QUEUE_NUM); 857 } 858 } 859 860 for (i = 0; i < s->num_type2_screeners; i++) { 861 reg = s->regs[R_SCREENING_TYPE2_REG0 + i]; 862 matched = false; 863 mismatched = false; 864 865 if (FIELD_EX32(reg, SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE)) { 866 uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13]; 867 int et_idx = FIELD_EX32(reg, SCREENING_TYPE2_REG0, 868 ETHERTYPE_REG_INDEX); 869 870 if (et_idx > s->num_type2_screeners) { 871 qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype " 872 "register index: %d\n", et_idx); 873 } 874 if (type == s->regs[R_SCREENING_TYPE2_ETHERTYPE_REG0 + 875 et_idx]) { 876 matched = true; 877 } else { 878 mismatched = true; 879 } 880 } 881 882 /* Compare A, B, C */ 883 for (j = 0; j < 3; j++) { 884 uint32_t cr0, cr1, mask, compare; 885 uint16_t rx_cmp; 886 int offset; 887 int cr_idx = extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_SHIFT + j * 6, 888 R_SCREENING_TYPE2_REG0_COMPARE_A_LENGTH); 889 890 if (!extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_SHIFT + j * 6, 891 R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_LENGTH)) { 892 continue; 893 } 894 895 if (cr_idx > s->num_type2_screeners) { 896 qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare " 897 "register index: %d\n", cr_idx); 898 } 899 900 cr0 = s->regs[R_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2]; 901 cr1 = s->regs[R_TYPE2_COMPARE_0_WORD_1 + cr_idx * 2]; 902 offset = FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE); 903 904 switch (FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET)) { 905 case 3: /* Skip UDP header */ 906 qemu_log_mask(LOG_UNIMP, "TCP compare offsets" 907 "unimplemented - assuming UDP\n"); 908 offset += 8; 909 /* Fallthrough */ 910 case 2: /* skip the IP header */ 911 offset += 20; 912 /* Fallthrough */ 913 case 1: /* Count from after the ethertype */ 914 offset += 14; 915 break; 916 case 0: 917 /* Offset from start of frame */ 918 break; 919 } 920 921 rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset]; 922 mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE); 923 compare = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE); 924 925 if ((rx_cmp & mask) == (compare & mask)) { 926 matched = true; 927 } else { 928 mismatched = true; 929 } 930 } 931 932 if (matched && !mismatched) { 933 return FIELD_EX32(reg, SCREENING_TYPE2_REG0, QUEUE_NUM); 934 } 935 } 936 937 /* We made it here, assume it's queue 0 */ 938 return 0; 939 } 940 941 static uint32_t gem_get_queue_base_addr(CadenceGEMState *s, bool tx, int q) 942 { 943 uint32_t base_addr = 0; 944 945 switch (q) { 946 case 0: 947 base_addr = s->regs[tx ? R_TXQBASE : R_RXQBASE]; 948 break; 949 case 1 ... (MAX_PRIORITY_QUEUES - 1): 950 base_addr = s->regs[(tx ? R_TRANSMIT_Q1_PTR : 951 R_RECEIVE_Q1_PTR) + q - 1]; 952 break; 953 default: 954 g_assert_not_reached(); 955 }; 956 957 return base_addr; 958 } 959 960 static inline uint32_t gem_get_tx_queue_base_addr(CadenceGEMState *s, int q) 961 { 962 return gem_get_queue_base_addr(s, true, q); 963 } 964 965 static inline uint32_t gem_get_rx_queue_base_addr(CadenceGEMState *s, int q) 966 { 967 return gem_get_queue_base_addr(s, false, q); 968 } 969 970 static hwaddr gem_get_desc_addr(CadenceGEMState *s, bool tx, int q) 971 { 972 hwaddr desc_addr = 0; 973 974 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 975 desc_addr = s->regs[tx ? R_TBQPH : R_RBQPH]; 976 } 977 desc_addr <<= 32; 978 desc_addr |= tx ? s->tx_desc_addr[q] : s->rx_desc_addr[q]; 979 return desc_addr; 980 } 981 982 static hwaddr gem_get_tx_desc_addr(CadenceGEMState *s, int q) 983 { 984 return gem_get_desc_addr(s, true, q); 985 } 986 987 static hwaddr gem_get_rx_desc_addr(CadenceGEMState *s, int q) 988 { 989 return gem_get_desc_addr(s, false, q); 990 } 991 992 static void gem_get_rx_desc(CadenceGEMState *s, int q) 993 { 994 hwaddr desc_addr = gem_get_rx_desc_addr(s, q); 995 996 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", desc_addr); 997 998 /* read current descriptor */ 999 address_space_read(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, 1000 s->rx_desc[q], 1001 sizeof(uint32_t) * gem_get_desc_len(s, true)); 1002 1003 /* Descriptor owned by software ? */ 1004 if (rx_desc_get_ownership(s->rx_desc[q]) == 1) { 1005 DB_PRINT("descriptor 0x%" HWADDR_PRIx " owned by sw.\n", desc_addr); 1006 s->regs[R_RXSTATUS] |= R_RXSTATUS_BUF_NOT_AVAILABLE_MASK; 1007 gem_set_isr(s, q, GEM_INT_RXUSED); 1008 /* Handle interrupt consequences */ 1009 gem_update_int_status(s); 1010 } 1011 } 1012 1013 /* 1014 * gem_receive: 1015 * Fit a packet handed to us by QEMU into the receive descriptor ring. 1016 */ 1017 static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) 1018 { 1019 CadenceGEMState *s = qemu_get_nic_opaque(nc); 1020 unsigned rxbufsize, bytes_to_copy; 1021 unsigned rxbuf_offset; 1022 uint8_t *rxbuf_ptr; 1023 bool first_desc = true; 1024 int maf; 1025 int q = 0; 1026 1027 /* Is this destination MAC address "for us" ? */ 1028 maf = gem_mac_address_filter(s, buf); 1029 if (maf == GEM_RX_REJECT) { 1030 return size; /* no, drop silently b/c it's not an error */ 1031 } 1032 1033 /* Discard packets with receive length error enabled ? */ 1034 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, LEN_ERR_DISCARD)) { 1035 unsigned type_len; 1036 1037 /* Fish the ethertype / length field out of the RX packet */ 1038 type_len = buf[12] << 8 | buf[13]; 1039 /* It is a length field, not an ethertype */ 1040 if (type_len < 0x600) { 1041 if (size < type_len) { 1042 /* discard */ 1043 return -1; 1044 } 1045 } 1046 } 1047 1048 /* 1049 * Determine configured receive buffer offset (probably 0) 1050 */ 1051 rxbuf_offset = FIELD_EX32(s->regs[R_NWCFG], NWCFG, RECV_BUF_OFFSET); 1052 1053 /* The configure size of each receive buffer. Determines how many 1054 * buffers needed to hold this packet. 1055 */ 1056 rxbufsize = FIELD_EX32(s->regs[R_DMACFG], DMACFG, RX_BUF_SIZE); 1057 rxbufsize *= GEM_DMACFG_RBUFSZ_MUL; 1058 1059 bytes_to_copy = size; 1060 1061 /* Hardware allows a zero value here but warns against it. To avoid QEMU 1062 * indefinite loops we enforce a minimum value here 1063 */ 1064 if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) { 1065 rxbufsize = GEM_DMACFG_RBUFSZ_MUL; 1066 } 1067 1068 /* Pad to minimum length. Assume FCS field is stripped, logic 1069 * below will increment it to the real minimum of 64 when 1070 * not FCS stripping 1071 */ 1072 if (size < 60) { 1073 size = 60; 1074 } 1075 1076 /* Strip of FCS field ? (usually yes) */ 1077 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, FCS_REMOVE)) { 1078 rxbuf_ptr = (void *)buf; 1079 } else { 1080 unsigned crc_val; 1081 1082 if (size > MAX_FRAME_SIZE - sizeof(crc_val)) { 1083 size = MAX_FRAME_SIZE - sizeof(crc_val); 1084 } 1085 bytes_to_copy = size; 1086 /* The application wants the FCS field, which QEMU does not provide. 1087 * We must try and calculate one. 1088 */ 1089 1090 memcpy(s->rx_packet, buf, size); 1091 memset(s->rx_packet + size, 0, MAX_FRAME_SIZE - size); 1092 rxbuf_ptr = s->rx_packet; 1093 crc_val = cpu_to_le32(crc32(0, s->rx_packet, MAX(size, 60))); 1094 memcpy(s->rx_packet + size, &crc_val, sizeof(crc_val)); 1095 1096 bytes_to_copy += 4; 1097 size += 4; 1098 } 1099 1100 DB_PRINT("config bufsize: %u packet size: %zd\n", rxbufsize, size); 1101 1102 /* Find which queue we are targeting */ 1103 q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize); 1104 1105 if (size > gem_get_max_buf_len(s, false)) { 1106 qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n"); 1107 gem_set_isr(s, q, GEM_INT_AMBA_ERR); 1108 return -1; 1109 } 1110 1111 while (bytes_to_copy) { 1112 hwaddr desc_addr; 1113 1114 /* Do nothing if receive is not enabled. */ 1115 if (!gem_can_receive(nc)) { 1116 return -1; 1117 } 1118 1119 DB_PRINT("copy %" PRIu32 " bytes to 0x%" PRIx64 "\n", 1120 MIN(bytes_to_copy, rxbufsize), 1121 rx_desc_get_buffer(s, s->rx_desc[q])); 1122 1123 /* Copy packet data to emulated DMA buffer */ 1124 address_space_write(&s->dma_as, rx_desc_get_buffer(s, s->rx_desc[q]) + 1125 rxbuf_offset, 1126 MEMTXATTRS_UNSPECIFIED, rxbuf_ptr, 1127 MIN(bytes_to_copy, rxbufsize)); 1128 rxbuf_ptr += MIN(bytes_to_copy, rxbufsize); 1129 bytes_to_copy -= MIN(bytes_to_copy, rxbufsize); 1130 1131 rx_desc_clear_control(s->rx_desc[q]); 1132 1133 /* Update the descriptor. */ 1134 if (first_desc) { 1135 rx_desc_set_sof(s->rx_desc[q]); 1136 first_desc = false; 1137 } 1138 if (bytes_to_copy == 0) { 1139 rx_desc_set_eof(s->rx_desc[q]); 1140 rx_desc_set_length(s->rx_desc[q], size); 1141 } 1142 rx_desc_set_ownership(s->rx_desc[q]); 1143 1144 switch (maf) { 1145 case GEM_RX_PROMISCUOUS_ACCEPT: 1146 break; 1147 case GEM_RX_BROADCAST_ACCEPT: 1148 rx_desc_set_broadcast(s->rx_desc[q]); 1149 break; 1150 case GEM_RX_UNICAST_HASH_ACCEPT: 1151 rx_desc_set_unicast_hash(s->rx_desc[q]); 1152 break; 1153 case GEM_RX_MULTICAST_HASH_ACCEPT: 1154 rx_desc_set_multicast_hash(s->rx_desc[q]); 1155 break; 1156 case GEM_RX_REJECT: 1157 abort(); 1158 default: /* SAR */ 1159 rx_desc_set_sar(s->rx_desc[q], maf); 1160 } 1161 1162 /* Descriptor write-back. */ 1163 desc_addr = gem_get_rx_desc_addr(s, q); 1164 address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, 1165 s->rx_desc[q], 1166 sizeof(uint32_t) * gem_get_desc_len(s, true)); 1167 1168 /* Next descriptor */ 1169 if (rx_desc_get_wrap(s->rx_desc[q])) { 1170 DB_PRINT("wrapping RX descriptor list\n"); 1171 s->rx_desc_addr[q] = gem_get_rx_queue_base_addr(s, q); 1172 } else { 1173 DB_PRINT("incrementing RX descriptor list\n"); 1174 s->rx_desc_addr[q] += 4 * gem_get_desc_len(s, true); 1175 } 1176 1177 gem_get_rx_desc(s, q); 1178 } 1179 1180 /* Count it */ 1181 gem_receive_updatestats(s, buf, size); 1182 1183 s->regs[R_RXSTATUS] |= R_RXSTATUS_FRAME_RECEIVED_MASK; 1184 gem_set_isr(s, q, GEM_INT_RXCMPL); 1185 1186 /* Handle interrupt consequences */ 1187 gem_update_int_status(s); 1188 1189 return size; 1190 } 1191 1192 /* 1193 * gem_transmit_updatestats: 1194 * Increment transmit statistics. 1195 */ 1196 static void gem_transmit_updatestats(CadenceGEMState *s, const uint8_t *packet, 1197 unsigned bytes) 1198 { 1199 uint64_t octets; 1200 1201 /* Total octets (bytes) transmitted */ 1202 octets = ((uint64_t)(s->regs[R_OCTTXLO]) << 32) | 1203 s->regs[R_OCTTXHI]; 1204 octets += bytes; 1205 s->regs[R_OCTTXLO] = octets >> 32; 1206 s->regs[R_OCTTXHI] = octets; 1207 1208 /* Error-free Frames transmitted */ 1209 s->regs[R_TXCNT]++; 1210 1211 /* Error-free Broadcast Frames counter */ 1212 if (!memcmp(packet, broadcast_addr, 6)) { 1213 s->regs[R_TXBCNT]++; 1214 } 1215 1216 /* Error-free Multicast Frames counter */ 1217 if (packet[0] == 0x01) { 1218 s->regs[R_TXMCNT]++; 1219 } 1220 1221 if (bytes <= 64) { 1222 s->regs[R_TX64CNT]++; 1223 } else if (bytes <= 127) { 1224 s->regs[R_TX65CNT]++; 1225 } else if (bytes <= 255) { 1226 s->regs[R_TX128CNT]++; 1227 } else if (bytes <= 511) { 1228 s->regs[R_TX256CNT]++; 1229 } else if (bytes <= 1023) { 1230 s->regs[R_TX512CNT]++; 1231 } else if (bytes <= 1518) { 1232 s->regs[R_TX1024CNT]++; 1233 } else { 1234 s->regs[R_TX1519CNT]++; 1235 } 1236 } 1237 1238 /* 1239 * gem_transmit: 1240 * Fish packets out of the descriptor ring and feed them to QEMU 1241 */ 1242 static void gem_transmit(CadenceGEMState *s) 1243 { 1244 uint32_t desc[DESC_MAX_NUM_WORDS]; 1245 hwaddr packet_desc_addr; 1246 uint8_t *p; 1247 unsigned total_bytes; 1248 int q = 0; 1249 1250 /* Do nothing if transmit is not enabled. */ 1251 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) { 1252 return; 1253 } 1254 1255 DB_PRINT("\n"); 1256 1257 /* The packet we will hand off to QEMU. 1258 * Packets scattered across multiple descriptors are gathered to this 1259 * one contiguous buffer first. 1260 */ 1261 p = s->tx_packet; 1262 total_bytes = 0; 1263 1264 for (q = s->num_priority_queues - 1; q >= 0; q--) { 1265 /* read current descriptor */ 1266 packet_desc_addr = gem_get_tx_desc_addr(s, q); 1267 1268 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); 1269 address_space_read(&s->dma_as, packet_desc_addr, 1270 MEMTXATTRS_UNSPECIFIED, desc, 1271 sizeof(uint32_t) * gem_get_desc_len(s, false)); 1272 /* Handle all descriptors owned by hardware */ 1273 while (tx_desc_get_used(desc) == 0) { 1274 1275 /* Do nothing if transmit is not enabled. */ 1276 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) { 1277 return; 1278 } 1279 print_gem_tx_desc(desc, q); 1280 1281 /* The real hardware would eat this (and possibly crash). 1282 * For QEMU let's lend a helping hand. 1283 */ 1284 if ((tx_desc_get_buffer(s, desc) == 0) || 1285 (tx_desc_get_length(desc) == 0)) { 1286 DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n", 1287 packet_desc_addr); 1288 break; 1289 } 1290 1291 if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) - 1292 (p - s->tx_packet)) { 1293 qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \ 1294 HWADDR_PRIx " too large: size 0x%x space 0x%zx\n", 1295 packet_desc_addr, tx_desc_get_length(desc), 1296 gem_get_max_buf_len(s, true) - (p - s->tx_packet)); 1297 gem_set_isr(s, q, GEM_INT_AMBA_ERR); 1298 break; 1299 } 1300 1301 /* Gather this fragment of the packet from "dma memory" to our 1302 * contig buffer. 1303 */ 1304 address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc), 1305 MEMTXATTRS_UNSPECIFIED, 1306 p, tx_desc_get_length(desc)); 1307 p += tx_desc_get_length(desc); 1308 total_bytes += tx_desc_get_length(desc); 1309 1310 /* Last descriptor for this packet; hand the whole thing off */ 1311 if (tx_desc_get_last(desc)) { 1312 uint32_t desc_first[DESC_MAX_NUM_WORDS]; 1313 hwaddr desc_addr = gem_get_tx_desc_addr(s, q); 1314 1315 /* Modify the 1st descriptor of this packet to be owned by 1316 * the processor. 1317 */ 1318 address_space_read(&s->dma_as, desc_addr, 1319 MEMTXATTRS_UNSPECIFIED, desc_first, 1320 sizeof(desc_first)); 1321 tx_desc_set_used(desc_first); 1322 address_space_write(&s->dma_as, desc_addr, 1323 MEMTXATTRS_UNSPECIFIED, desc_first, 1324 sizeof(desc_first)); 1325 /* Advance the hardware current descriptor past this packet */ 1326 if (tx_desc_get_wrap(desc)) { 1327 s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q); 1328 } else { 1329 s->tx_desc_addr[q] = packet_desc_addr + 1330 4 * gem_get_desc_len(s, false); 1331 } 1332 DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]); 1333 1334 s->regs[R_TXSTATUS] |= R_TXSTATUS_TRANSMIT_COMPLETE_MASK; 1335 gem_set_isr(s, q, GEM_INT_TXCMPL); 1336 1337 /* Handle interrupt consequences */ 1338 gem_update_int_status(s); 1339 1340 /* Is checksum offload enabled? */ 1341 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, TX_PBUF_CSUM_OFFLOAD)) { 1342 net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL); 1343 } 1344 1345 /* Update MAC statistics */ 1346 gem_transmit_updatestats(s, s->tx_packet, total_bytes); 1347 1348 /* Send the packet somewhere */ 1349 if (s->phy_loop || FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, 1350 LOOPBACK_LOCAL)) { 1351 qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet, 1352 total_bytes); 1353 } else { 1354 qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet, 1355 total_bytes); 1356 } 1357 1358 /* Prepare for next packet */ 1359 p = s->tx_packet; 1360 total_bytes = 0; 1361 } 1362 1363 /* read next descriptor */ 1364 if (tx_desc_get_wrap(desc)) { 1365 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 1366 packet_desc_addr = s->regs[R_TBQPH]; 1367 packet_desc_addr <<= 32; 1368 } else { 1369 packet_desc_addr = 0; 1370 } 1371 packet_desc_addr |= gem_get_tx_queue_base_addr(s, q); 1372 } else { 1373 packet_desc_addr += 4 * gem_get_desc_len(s, false); 1374 } 1375 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); 1376 address_space_read(&s->dma_as, packet_desc_addr, 1377 MEMTXATTRS_UNSPECIFIED, desc, 1378 sizeof(uint32_t) * gem_get_desc_len(s, false)); 1379 } 1380 1381 if (tx_desc_get_used(desc)) { 1382 s->regs[R_TXSTATUS] |= R_TXSTATUS_USED_BIT_READ_MASK; 1383 /* IRQ TXUSED is defined only for queue 0 */ 1384 if (q == 0) { 1385 gem_set_isr(s, 0, GEM_INT_TXUSED); 1386 } 1387 gem_update_int_status(s); 1388 } 1389 } 1390 } 1391 1392 static void gem_phy_reset(CadenceGEMState *s) 1393 { 1394 memset(&s->phy_regs[0], 0, sizeof(s->phy_regs)); 1395 s->phy_regs[PHY_REG_CONTROL] = 0x1140; 1396 s->phy_regs[PHY_REG_STATUS] = 0x7969; 1397 s->phy_regs[PHY_REG_PHYID1] = 0x0141; 1398 s->phy_regs[PHY_REG_PHYID2] = 0x0CC2; 1399 s->phy_regs[PHY_REG_ANEGADV] = 0x01E1; 1400 s->phy_regs[PHY_REG_LINKPABIL] = 0xCDE1; 1401 s->phy_regs[PHY_REG_ANEGEXP] = 0x000F; 1402 s->phy_regs[PHY_REG_NEXTP] = 0x2001; 1403 s->phy_regs[PHY_REG_LINKPNEXTP] = 0x40E6; 1404 s->phy_regs[PHY_REG_100BTCTRL] = 0x0300; 1405 s->phy_regs[PHY_REG_1000BTSTAT] = 0x7C00; 1406 s->phy_regs[PHY_REG_EXTSTAT] = 0x3000; 1407 s->phy_regs[PHY_REG_PHYSPCFC_CTL] = 0x0078; 1408 s->phy_regs[PHY_REG_PHYSPCFC_ST] = 0x7C00; 1409 s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL] = 0x0C60; 1410 s->phy_regs[PHY_REG_LED] = 0x4100; 1411 s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL2] = 0x000A; 1412 s->phy_regs[PHY_REG_EXT_PHYSPCFC_ST] = 0x848B; 1413 1414 phy_update_link(s); 1415 } 1416 1417 static void gem_reset(DeviceState *d) 1418 { 1419 int i; 1420 CadenceGEMState *s = CADENCE_GEM(d); 1421 const uint8_t *a; 1422 uint32_t queues_mask = 0; 1423 1424 DB_PRINT("\n"); 1425 1426 /* Set post reset register values */ 1427 memset(&s->regs[0], 0, sizeof(s->regs)); 1428 s->regs[R_NWCFG] = 0x00080000; 1429 s->regs[R_NWSTATUS] = 0x00000006; 1430 s->regs[R_DMACFG] = 0x00020784; 1431 s->regs[R_IMR] = 0x07ffffff; 1432 s->regs[R_TXPAUSE] = 0x0000ffff; 1433 s->regs[R_TXPARTIALSF] = 0x000003ff; 1434 s->regs[R_RXPARTIALSF] = 0x000003ff; 1435 s->regs[R_MODID] = s->revision; 1436 s->regs[R_DESCONF] = 0x02D00111; 1437 s->regs[R_DESCONF2] = 0x2ab10000 | s->jumbo_max_len; 1438 s->regs[R_DESCONF5] = 0x002f2045; 1439 s->regs[R_DESCONF6] = GEM_DESCONF6_64B_MASK; 1440 s->regs[R_INT_Q1_MASK] = 0x00000CE6; 1441 s->regs[R_JUMBO_MAX_LEN] = s->jumbo_max_len; 1442 1443 if (s->num_priority_queues > 1) { 1444 queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1); 1445 s->regs[R_DESCONF6] |= queues_mask; 1446 } 1447 1448 /* Set MAC address */ 1449 a = &s->conf.macaddr.a[0]; 1450 s->regs[R_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24); 1451 s->regs[R_SPADDR1HI] = a[4] | (a[5] << 8); 1452 1453 for (i = 0; i < 4; i++) { 1454 s->sar_active[i] = false; 1455 } 1456 1457 gem_phy_reset(s); 1458 1459 gem_update_int_status(s); 1460 } 1461 1462 static uint16_t gem_phy_read(CadenceGEMState *s, unsigned reg_num) 1463 { 1464 DB_PRINT("reg: %d value: 0x%04x\n", reg_num, s->phy_regs[reg_num]); 1465 return s->phy_regs[reg_num]; 1466 } 1467 1468 static void gem_phy_write(CadenceGEMState *s, unsigned reg_num, uint16_t val) 1469 { 1470 DB_PRINT("reg: %d value: 0x%04x\n", reg_num, val); 1471 1472 switch (reg_num) { 1473 case PHY_REG_CONTROL: 1474 if (val & PHY_REG_CONTROL_RST) { 1475 /* Phy reset */ 1476 gem_phy_reset(s); 1477 val &= ~(PHY_REG_CONTROL_RST | PHY_REG_CONTROL_LOOP); 1478 s->phy_loop = 0; 1479 } 1480 if (val & PHY_REG_CONTROL_ANEG) { 1481 /* Complete autonegotiation immediately */ 1482 val &= ~(PHY_REG_CONTROL_ANEG | PHY_REG_CONTROL_ANRESTART); 1483 s->phy_regs[PHY_REG_STATUS] |= PHY_REG_STATUS_ANEGCMPL; 1484 } 1485 if (val & PHY_REG_CONTROL_LOOP) { 1486 DB_PRINT("PHY placed in loopback\n"); 1487 s->phy_loop = 1; 1488 } else { 1489 s->phy_loop = 0; 1490 } 1491 break; 1492 } 1493 s->phy_regs[reg_num] = val; 1494 } 1495 1496 /* 1497 * gem_read32: 1498 * Read a GEM register. 1499 */ 1500 static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size) 1501 { 1502 CadenceGEMState *s; 1503 uint32_t retval; 1504 s = opaque; 1505 1506 offset >>= 2; 1507 retval = s->regs[offset]; 1508 1509 DB_PRINT("offset: 0x%04x read: 0x%08x\n", (unsigned)offset*4, retval); 1510 1511 switch (offset) { 1512 case R_ISR: 1513 DB_PRINT("lowering irqs on ISR read\n"); 1514 /* The interrupts get updated at the end of the function. */ 1515 break; 1516 case R_PHYMNTNC: 1517 if (retval & GEM_PHYMNTNC_OP_R) { 1518 uint32_t phy_addr, reg_num; 1519 1520 phy_addr = (retval & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT; 1521 if (phy_addr == s->phy_addr) { 1522 reg_num = (retval & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT; 1523 retval &= 0xFFFF0000; 1524 retval |= gem_phy_read(s, reg_num); 1525 } else { 1526 retval |= 0xFFFF; /* No device at this address */ 1527 } 1528 } 1529 break; 1530 } 1531 1532 /* Squash read to clear bits */ 1533 s->regs[offset] &= ~(s->regs_rtc[offset]); 1534 1535 /* Do not provide write only bits */ 1536 retval &= ~(s->regs_wo[offset]); 1537 1538 DB_PRINT("0x%08x\n", retval); 1539 gem_update_int_status(s); 1540 return retval; 1541 } 1542 1543 /* 1544 * gem_write32: 1545 * Write a GEM register. 1546 */ 1547 static void gem_write(void *opaque, hwaddr offset, uint64_t val, 1548 unsigned size) 1549 { 1550 CadenceGEMState *s = (CadenceGEMState *)opaque; 1551 uint32_t readonly; 1552 int i; 1553 1554 DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val); 1555 offset >>= 2; 1556 1557 /* Squash bits which are read only in write value */ 1558 val &= ~(s->regs_ro[offset]); 1559 /* Preserve (only) bits which are read only and wtc in register */ 1560 readonly = s->regs[offset] & (s->regs_ro[offset] | s->regs_w1c[offset]); 1561 1562 /* Copy register write to backing store */ 1563 s->regs[offset] = (val & ~s->regs_w1c[offset]) | readonly; 1564 1565 /* do w1c */ 1566 s->regs[offset] &= ~(s->regs_w1c[offset] & val); 1567 1568 /* Handle register write side effects */ 1569 switch (offset) { 1570 case R_NWCTRL: 1571 if (FIELD_EX32(val, NWCTRL, ENABLE_RECEIVE)) { 1572 for (i = 0; i < s->num_priority_queues; ++i) { 1573 gem_get_rx_desc(s, i); 1574 } 1575 } 1576 if (FIELD_EX32(val, NWCTRL, TRANSMIT_START)) { 1577 gem_transmit(s); 1578 } 1579 if (!(FIELD_EX32(val, NWCTRL, ENABLE_TRANSMIT))) { 1580 /* Reset to start of Q when transmit disabled. */ 1581 for (i = 0; i < s->num_priority_queues; i++) { 1582 s->tx_desc_addr[i] = gem_get_tx_queue_base_addr(s, i); 1583 } 1584 } 1585 if (gem_can_receive(qemu_get_queue(s->nic))) { 1586 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 1587 } 1588 break; 1589 1590 case R_TXSTATUS: 1591 gem_update_int_status(s); 1592 break; 1593 case R_RXQBASE: 1594 s->rx_desc_addr[0] = val; 1595 break; 1596 case R_RECEIVE_Q1_PTR ... R_RECEIVE_Q7_PTR: 1597 s->rx_desc_addr[offset - R_RECEIVE_Q1_PTR + 1] = val; 1598 break; 1599 case R_TXQBASE: 1600 s->tx_desc_addr[0] = val; 1601 break; 1602 case R_TRANSMIT_Q1_PTR ... R_TRANSMIT_Q7_PTR: 1603 s->tx_desc_addr[offset - R_TRANSMIT_Q1_PTR + 1] = val; 1604 break; 1605 case R_RXSTATUS: 1606 gem_update_int_status(s); 1607 break; 1608 case R_IER: 1609 s->regs[R_IMR] &= ~val; 1610 gem_update_int_status(s); 1611 break; 1612 case R_JUMBO_MAX_LEN: 1613 s->regs[R_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK; 1614 break; 1615 case R_INT_Q1_ENABLE ... R_INT_Q7_ENABLE: 1616 s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_ENABLE] &= ~val; 1617 gem_update_int_status(s); 1618 break; 1619 case R_IDR: 1620 s->regs[R_IMR] |= val; 1621 gem_update_int_status(s); 1622 break; 1623 case R_INT_Q1_DISABLE ... R_INT_Q7_DISABLE: 1624 s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_DISABLE] |= val; 1625 gem_update_int_status(s); 1626 break; 1627 case R_SPADDR1LO: 1628 case R_SPADDR2LO: 1629 case R_SPADDR3LO: 1630 case R_SPADDR4LO: 1631 s->sar_active[(offset - R_SPADDR1LO) / 2] = false; 1632 break; 1633 case R_SPADDR1HI: 1634 case R_SPADDR2HI: 1635 case R_SPADDR3HI: 1636 case R_SPADDR4HI: 1637 s->sar_active[(offset - R_SPADDR1HI) / 2] = true; 1638 break; 1639 case R_PHYMNTNC: 1640 if (val & GEM_PHYMNTNC_OP_W) { 1641 uint32_t phy_addr, reg_num; 1642 1643 phy_addr = (val & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT; 1644 if (phy_addr == s->phy_addr) { 1645 reg_num = (val & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT; 1646 gem_phy_write(s, reg_num, val); 1647 } 1648 } 1649 break; 1650 } 1651 1652 DB_PRINT("newval: 0x%08x\n", s->regs[offset]); 1653 } 1654 1655 static const MemoryRegionOps gem_ops = { 1656 .read = gem_read, 1657 .write = gem_write, 1658 .endianness = DEVICE_LITTLE_ENDIAN, 1659 }; 1660 1661 static void gem_set_link(NetClientState *nc) 1662 { 1663 CadenceGEMState *s = qemu_get_nic_opaque(nc); 1664 1665 DB_PRINT("\n"); 1666 phy_update_link(s); 1667 gem_update_int_status(s); 1668 } 1669 1670 static NetClientInfo net_gem_info = { 1671 .type = NET_CLIENT_DRIVER_NIC, 1672 .size = sizeof(NICState), 1673 .can_receive = gem_can_receive, 1674 .receive = gem_receive, 1675 .link_status_changed = gem_set_link, 1676 }; 1677 1678 static void gem_realize(DeviceState *dev, Error **errp) 1679 { 1680 CadenceGEMState *s = CADENCE_GEM(dev); 1681 int i; 1682 1683 address_space_init(&s->dma_as, 1684 s->dma_mr ? s->dma_mr : get_system_memory(), "dma"); 1685 1686 if (s->num_priority_queues == 0 || 1687 s->num_priority_queues > MAX_PRIORITY_QUEUES) { 1688 error_setg(errp, "Invalid num-priority-queues value: %" PRIx8, 1689 s->num_priority_queues); 1690 return; 1691 } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) { 1692 error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8, 1693 s->num_type1_screeners); 1694 return; 1695 } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) { 1696 error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8, 1697 s->num_type2_screeners); 1698 return; 1699 } 1700 1701 for (i = 0; i < s->num_priority_queues; ++i) { 1702 sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); 1703 } 1704 1705 qemu_macaddr_default_if_unset(&s->conf.macaddr); 1706 1707 s->nic = qemu_new_nic(&net_gem_info, &s->conf, 1708 object_get_typename(OBJECT(dev)), dev->id, s); 1709 1710 if (s->jumbo_max_len > MAX_FRAME_SIZE) { 1711 error_setg(errp, "jumbo-max-len is greater than %d", 1712 MAX_FRAME_SIZE); 1713 return; 1714 } 1715 } 1716 1717 static void gem_init(Object *obj) 1718 { 1719 CadenceGEMState *s = CADENCE_GEM(obj); 1720 DeviceState *dev = DEVICE(obj); 1721 1722 DB_PRINT("\n"); 1723 1724 gem_init_register_masks(s); 1725 memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s, 1726 "enet", sizeof(s->regs)); 1727 1728 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); 1729 } 1730 1731 static const VMStateDescription vmstate_cadence_gem = { 1732 .name = "cadence_gem", 1733 .version_id = 4, 1734 .minimum_version_id = 4, 1735 .fields = (VMStateField[]) { 1736 VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG), 1737 VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32), 1738 VMSTATE_UINT8(phy_loop, CadenceGEMState), 1739 VMSTATE_UINT32_ARRAY(rx_desc_addr, CadenceGEMState, 1740 MAX_PRIORITY_QUEUES), 1741 VMSTATE_UINT32_ARRAY(tx_desc_addr, CadenceGEMState, 1742 MAX_PRIORITY_QUEUES), 1743 VMSTATE_BOOL_ARRAY(sar_active, CadenceGEMState, 4), 1744 VMSTATE_END_OF_LIST(), 1745 } 1746 }; 1747 1748 static Property gem_properties[] = { 1749 DEFINE_NIC_PROPERTIES(CadenceGEMState, conf), 1750 DEFINE_PROP_UINT32("revision", CadenceGEMState, revision, 1751 GEM_MODID_VALUE), 1752 DEFINE_PROP_UINT8("phy-addr", CadenceGEMState, phy_addr, BOARD_PHY_ADDRESS), 1753 DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState, 1754 num_priority_queues, 1), 1755 DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState, 1756 num_type1_screeners, 4), 1757 DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState, 1758 num_type2_screeners, 4), 1759 DEFINE_PROP_UINT16("jumbo-max-len", CadenceGEMState, 1760 jumbo_max_len, 10240), 1761 DEFINE_PROP_LINK("dma", CadenceGEMState, dma_mr, 1762 TYPE_MEMORY_REGION, MemoryRegion *), 1763 DEFINE_PROP_END_OF_LIST(), 1764 }; 1765 1766 static void gem_class_init(ObjectClass *klass, void *data) 1767 { 1768 DeviceClass *dc = DEVICE_CLASS(klass); 1769 1770 dc->realize = gem_realize; 1771 device_class_set_props(dc, gem_properties); 1772 dc->vmsd = &vmstate_cadence_gem; 1773 dc->reset = gem_reset; 1774 } 1775 1776 static const TypeInfo gem_info = { 1777 .name = TYPE_CADENCE_GEM, 1778 .parent = TYPE_SYS_BUS_DEVICE, 1779 .instance_size = sizeof(CadenceGEMState), 1780 .instance_init = gem_init, 1781 .class_init = gem_class_init, 1782 }; 1783 1784 static void gem_register_types(void) 1785 { 1786 type_register_static(&gem_info); 1787 } 1788 1789 type_init(gem_register_types) 1790